code stringlengths 82 54.1k | code_codestyle int64 0 699 | style_context stringlengths 111 35.6k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class snake_case__ ( _UpperCamelCase ):
_SCREAMING_SNAKE_CASE : Any = "openai/whisper-base"
_SCREAMING_SNAKE_CASE : List[Any] = (
"This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the "
"transcribed text."
)
_SCREAMING_SNAKE_CASE : Tuple = "transcriber"
_SCREAMING_SNAKE_CASE : Dict = WhisperProcessor
_SCREAMING_SNAKE_CASE : Optional[int] = WhisperForConditionalGeneration
_SCREAMING_SNAKE_CASE : Dict = ["audio"]
_SCREAMING_SNAKE_CASE : List[Any] = ["text"]
def UpperCAmelCase__ ( self : List[str] , A__ : Tuple ) -> Union[str, Any]:
'''simple docstring'''
return self.pre_processor(A__ , return_tensors="pt" ).input_features
def UpperCAmelCase__ ( self : Tuple , A__ : str ) -> str:
'''simple docstring'''
return self.model.generate(inputs=A__ )
def UpperCAmelCase__ ( self : Tuple , A__ : str ) -> Optional[Any]:
'''simple docstring'''
return self.pre_processor.batch_decode(A__ , skip_special_tokens=A__ )[0]
| 666 | from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
UpperCAmelCase = logging.get_logger(__name__)
if is_vision_available():
import PIL
class snake_case__ ( _UpperCamelCase ):
_SCREAMING_SNAKE_CASE : Dict = ["pixel_values"]
def __init__( self : Union[str, Any] , A__ : bool = True , A__ : Dict[str, int] = None , A__ : PILImageResampling = PILImageResampling.BICUBIC , A__ : bool = True , A__ : Dict[str, int] = None , A__ : bool = True , A__ : Union[int, float] = 1 / 2_55 , A__ : bool = True , A__ : Optional[Union[float, List[float]]] = None , A__ : Optional[Union[float, List[float]]] = None , A__ : bool = True , **A__ : Optional[int] , ) -> None:
'''simple docstring'''
super().__init__(**A__ )
snake_case_ : str = size if size is not None else {"shortest_edge": 2_24}
snake_case_ : Union[str, Any] = get_size_dict(A__ , default_to_square=A__ )
snake_case_ : List[Any] = crop_size if crop_size is not None else {"height": 2_24, "width": 2_24}
snake_case_ : Dict = get_size_dict(A__ , default_to_square=A__ , param_name="crop_size" )
snake_case_ : str = do_resize
snake_case_ : str = size
snake_case_ : Optional[Any] = resample
snake_case_ : Any = do_center_crop
snake_case_ : Any = crop_size
snake_case_ : str = do_rescale
snake_case_ : Optional[Any] = rescale_factor
snake_case_ : int = do_normalize
snake_case_ : Optional[Any] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
snake_case_ : List[str] = image_std if image_std is not None else OPENAI_CLIP_STD
snake_case_ : int = do_convert_rgb
def UpperCAmelCase__ ( self : Optional[int] , A__ : np.ndarray , A__ : Dict[str, int] , A__ : PILImageResampling = PILImageResampling.BICUBIC , A__ : Optional[Union[str, ChannelDimension]] = None , **A__ : List[str] , ) -> np.ndarray:
'''simple docstring'''
snake_case_ : str = get_size_dict(A__ , default_to_square=A__ )
if "shortest_edge" not in size:
raise ValueError(f"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}" )
snake_case_ : str = get_resize_output_image_size(A__ , size=size["shortest_edge"] , default_to_square=A__ )
return resize(A__ , size=A__ , resample=A__ , data_format=A__ , **A__ )
def UpperCAmelCase__ ( self : Tuple , A__ : np.ndarray , A__ : Dict[str, int] , A__ : Optional[Union[str, ChannelDimension]] = None , **A__ : List[Any] , ) -> np.ndarray:
'''simple docstring'''
snake_case_ : Optional[int] = get_size_dict(A__ )
if "height" not in size or "width" not in size:
raise ValueError(f"The `size` parameter must contain the keys (height, width). Got {size.keys()}" )
return center_crop(A__ , size=(size["height"], size["width"]) , data_format=A__ , **A__ )
def UpperCAmelCase__ ( self : Optional[Any] , A__ : np.ndarray , A__ : Union[int, float] , A__ : Optional[Union[str, ChannelDimension]] = None , **A__ : List[str] , ) -> str:
'''simple docstring'''
return rescale(A__ , scale=A__ , data_format=A__ , **A__ )
def UpperCAmelCase__ ( self : Any , A__ : np.ndarray , A__ : Union[float, List[float]] , A__ : Union[float, List[float]] , A__ : Optional[Union[str, ChannelDimension]] = None , **A__ : Any , ) -> np.ndarray:
'''simple docstring'''
return normalize(A__ , mean=A__ , std=A__ , data_format=A__ , **A__ )
def UpperCAmelCase__ ( self : List[Any] , A__ : ImageInput , A__ : bool = None , A__ : Dict[str, int] = None , A__ : PILImageResampling = None , A__ : bool = None , A__ : int = None , A__ : bool = None , A__ : float = None , A__ : bool = None , A__ : Optional[Union[float, List[float]]] = None , A__ : Optional[Union[float, List[float]]] = None , A__ : bool = None , A__ : Optional[Union[str, TensorType]] = None , A__ : Optional[ChannelDimension] = ChannelDimension.FIRST , **A__ : Optional[Any] , ) -> PIL.Image.Image:
'''simple docstring'''
snake_case_ : List[Any] = do_resize if do_resize is not None else self.do_resize
snake_case_ : Union[str, Any] = size if size is not None else self.size
snake_case_ : Any = get_size_dict(A__ , param_name="size" , default_to_square=A__ )
snake_case_ : Optional[int] = resample if resample is not None else self.resample
snake_case_ : int = do_center_crop if do_center_crop is not None else self.do_center_crop
snake_case_ : List[str] = crop_size if crop_size is not None else self.crop_size
snake_case_ : Tuple = get_size_dict(A__ , param_name="crop_size" , default_to_square=A__ )
snake_case_ : Optional[int] = do_rescale if do_rescale is not None else self.do_rescale
snake_case_ : str = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case_ : List[Any] = do_normalize if do_normalize is not None else self.do_normalize
snake_case_ : Any = image_mean if image_mean is not None else self.image_mean
snake_case_ : List[str] = image_std if image_std is not None else self.image_std
snake_case_ : List[str] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
snake_case_ : List[Any] = make_list_of_images(A__ )
if not valid_images(A__ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
snake_case_ : Dict = [convert_to_rgb(A__ ) for image in images]
# All transformations expect numpy arrays.
snake_case_ : Dict = [to_numpy_array(A__ ) for image in images]
if do_resize:
snake_case_ : Dict = [self.resize(image=A__ , size=A__ , resample=A__ ) for image in images]
if do_center_crop:
snake_case_ : Tuple = [self.center_crop(image=A__ , size=A__ ) for image in images]
if do_rescale:
snake_case_ : str = [self.rescale(image=A__ , scale=A__ ) for image in images]
if do_normalize:
snake_case_ : int = [self.normalize(image=A__ , mean=A__ , std=A__ ) for image in images]
snake_case_ : List[Any] = [to_channel_dimension_format(A__ , A__ ) for image in images]
snake_case_ : Tuple = {"pixel_values": images}
return BatchFeature(data=A__ , tensor_type=A__ )
| 666 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
"xlm-roberta-base": "https://huggingface.co/xlm-roberta-base/resolve/main/config.json",
"xlm-roberta-large": "https://huggingface.co/xlm-roberta-large/resolve/main/config.json",
"xlm-roberta-large-finetuned-conll02-dutch": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json"
),
"xlm-roberta-large-finetuned-conll02-spanish": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json"
),
"xlm-roberta-large-finetuned-conll03-english": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json"
),
"xlm-roberta-large-finetuned-conll03-german": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json"
),
}
class snake_case__ ( _UpperCamelCase ):
_SCREAMING_SNAKE_CASE : Tuple = "xlm-roberta"
def __init__( self : Tuple , A__ : List[str]=3_05_22 , A__ : str=7_68 , A__ : int=12 , A__ : Any=12 , A__ : int=30_72 , A__ : Any="gelu" , A__ : Optional[int]=0.1 , A__ : int=0.1 , A__ : Optional[Any]=5_12 , A__ : Dict=2 , A__ : Any=0.02 , A__ : Dict=1E-12 , A__ : List[str]=1 , A__ : Any=0 , A__ : Dict=2 , A__ : List[Any]="absolute" , A__ : Dict=True , A__ : Tuple=None , **A__ : int , ) -> Tuple:
'''simple docstring'''
super().__init__(pad_token_id=A__ , bos_token_id=A__ , eos_token_id=A__ , **A__ )
snake_case_ : Optional[Any] = vocab_size
snake_case_ : Tuple = hidden_size
snake_case_ : Optional[int] = num_hidden_layers
snake_case_ : Union[str, Any] = num_attention_heads
snake_case_ : int = hidden_act
snake_case_ : Optional[int] = intermediate_size
snake_case_ : int = hidden_dropout_prob
snake_case_ : List[str] = attention_probs_dropout_prob
snake_case_ : str = max_position_embeddings
snake_case_ : Tuple = type_vocab_size
snake_case_ : str = initializer_range
snake_case_ : List[Any] = layer_norm_eps
snake_case_ : str = position_embedding_type
snake_case_ : int = use_cache
snake_case_ : List[str] = classifier_dropout
class snake_case__ ( _UpperCamelCase ):
@property
def UpperCAmelCase__ ( self : List[str] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
snake_case_ : Dict = {0: "batch", 1: "choice", 2: "sequence"}
else:
snake_case_ : Optional[int] = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 666 | from __future__ import annotations
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: tuple[int, int] , lowerCAmelCase_: int ):
snake_case_ ,snake_case_ : Dict = position
snake_case_ : int = [
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
snake_case_ : Union[str, Any] = []
for position in positions:
snake_case_ ,snake_case_ : Union[str, Any] = position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(lowerCAmelCase_ )
return permissible_positions
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: list[list[int]] ):
return not any(elem == 0 for row in board for elem in row )
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: list[list[int]] , lowerCAmelCase_: tuple[int, int] , lowerCAmelCase_: int ):
if is_complete(lowerCAmelCase_ ):
return True
for position in get_valid_pos(lowerCAmelCase_ , len(lowerCAmelCase_ ) ):
snake_case_ ,snake_case_ : Dict = position
if board[y][x] == 0:
snake_case_ : List[str] = curr + 1
if open_knight_tour_helper(lowerCAmelCase_ , lowerCAmelCase_ , curr + 1 ):
return True
snake_case_ : Dict = 0
return False
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: int ):
snake_case_ : Any = [[0 for i in range(lowerCAmelCase_ )] for j in range(lowerCAmelCase_ )]
for i in range(lowerCAmelCase_ ):
for j in range(lowerCAmelCase_ ):
snake_case_ : Optional[Any] = 1
if open_knight_tour_helper(lowerCAmelCase_ , (i, j) , 1 ):
return board
snake_case_ : Dict = 0
snake_case_ : str = f"Open Kight Tour cannot be performed on a board of size {n}"
raise ValueError(lowerCAmelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 666 | 1 |
from queue import PriorityQueue
from typing import Any
import numpy as np
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: dict , lowerCAmelCase_: str , lowerCAmelCase_: set , lowerCAmelCase_: set , lowerCAmelCase_: dict , lowerCAmelCase_: dict , lowerCAmelCase_: PriorityQueue , lowerCAmelCase_: dict , lowerCAmelCase_: float | int , ):
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
snake_case_ : str = cst_fwd.get(lowerCAmelCase_ , np.inf )
snake_case_ : Dict = cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
snake_case_ : int = new_cost_f
snake_case_ : Optional[int] = v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
snake_case_ : Optional[Any] = cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: str , lowerCAmelCase_: str , lowerCAmelCase_: dict , lowerCAmelCase_: dict ):
snake_case_ : Dict = -1
snake_case_ : str = set()
snake_case_ : List[Any] = set()
snake_case_ : Dict = {source: 0}
snake_case_ : Optional[int] = {destination: 0}
snake_case_ : str = {source: None}
snake_case_ : Tuple = {destination: None}
snake_case_ : PriorityQueue[Any] = PriorityQueue()
snake_case_ : PriorityQueue[Any] = PriorityQueue()
snake_case_ : Union[str, Any] = np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
snake_case_ ,snake_case_ : str = queue_forward.get()
visited_forward.add(lowerCAmelCase_ )
snake_case_ ,snake_case_ : List[Any] = queue_backward.get()
visited_backward.add(lowerCAmelCase_ )
snake_case_ : Union[str, Any] = pass_and_relaxation(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , )
snake_case_ : List[str] = pass_and_relaxation(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , )
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
snake_case_ : Optional[Any] = shortest_distance
return shortest_path_distance
UpperCAmelCase = {
"B": [["C", 1]],
"C": [["D", 1]],
"D": [["F", 1]],
"E": [["B", 1], ["G", 2]],
"F": [],
"G": [["F", 1]],
}
UpperCAmelCase = {
"B": [["E", 1]],
"C": [["B", 1]],
"D": [["C", 1]],
"F": [["D", 1], ["G", 1]],
"E": [[None, np.inf]],
"G": [["E", 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 666 | from ...configuration_utils import PretrainedConfig
class snake_case__ ( _UpperCamelCase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = "bert-generation"
def __init__( self : Optional[int] , A__ : List[Any]=5_03_58 , A__ : Any=10_24 , A__ : Any=24 , A__ : List[Any]=16 , A__ : List[Any]=40_96 , A__ : int="gelu" , A__ : List[str]=0.1 , A__ : List[str]=0.1 , A__ : str=5_12 , A__ : int=0.02 , A__ : Any=1E-12 , A__ : Optional[Any]=0 , A__ : List[str]=2 , A__ : Optional[int]=1 , A__ : str="absolute" , A__ : Any=True , **A__ : Optional[Any] , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(pad_token_id=A__ , bos_token_id=A__ , eos_token_id=A__ , **A__ )
snake_case_ : str = vocab_size
snake_case_ : int = hidden_size
snake_case_ : Optional[Any] = num_hidden_layers
snake_case_ : Union[str, Any] = num_attention_heads
snake_case_ : Optional[Any] = hidden_act
snake_case_ : Tuple = intermediate_size
snake_case_ : str = hidden_dropout_prob
snake_case_ : Optional[Any] = attention_probs_dropout_prob
snake_case_ : str = max_position_embeddings
snake_case_ : Optional[Any] = initializer_range
snake_case_ : Optional[int] = layer_norm_eps
snake_case_ : str = position_embedding_type
snake_case_ : Dict = use_cache
| 666 | 1 |
import pytest
from datasets import inspect_metric, list_metrics, load_metric
@pytest.fixture
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: int ):
monkeypatch.setattr("datasets.utils.deprecation_utils._emitted_deprecation_warnings" , set() )
@pytest.fixture
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: Tuple ):
class snake_case__ :
def __init__( self : Any , A__ : List[str] ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : List[Any] = metric_id
class snake_case__ :
_SCREAMING_SNAKE_CASE : List[str] = [MetricMock(_UpperCamelCase ) for metric_id in ["accuracy", "mse", "precision", "codeparrot/apps_metric"]]
def UpperCAmelCase__ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
return self._metrics
monkeypatch.setattr("datasets.inspect.huggingface_hub" , HfhMock() )
@pytest.mark.parametrize(
"func, args" , [(load_metric, ("metrics/mse",)), (list_metrics, ()), (inspect_metric, ("metrics/mse", "tmp_path"))] )
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: Tuple , lowerCAmelCase_: int , lowerCAmelCase_: List[Any] , lowerCAmelCase_: Any , lowerCAmelCase_: List[str] ):
if "tmp_path" in args:
snake_case_ : List[Any] = tuple(arg if arg != "tmp_path" else tmp_path for arg in args )
with pytest.warns(lowerCAmelCase_ , match="https://huggingface.co/docs/evaluate" ):
func(*lowerCAmelCase_ )
| 666 | import math
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: int ):
snake_case_ : Any = []
snake_case_ : List[str] = 2
snake_case_ : Optional[int] = int(math.sqrt(lowerCAmelCase_ ) ) # Size of every segment
snake_case_ : str = [True] * (end + 1)
snake_case_ : Any = []
while start <= end:
if temp[start] is True:
in_prime.append(lowerCAmelCase_ )
for i in range(start * start , end + 1 , lowerCAmelCase_ ):
snake_case_ : Union[str, Any] = False
start += 1
prime += in_prime
snake_case_ : Dict = end + 1
snake_case_ : Dict = min(2 * end , lowerCAmelCase_ )
while low <= n:
snake_case_ : Any = [True] * (high - low + 1)
for each in in_prime:
snake_case_ : Optional[Any] = math.floor(low / each ) * each
if t < low:
t += each
for j in range(lowerCAmelCase_ , high + 1 , lowerCAmelCase_ ):
snake_case_ : List[Any] = False
for j in range(len(lowerCAmelCase_ ) ):
if temp[j] is True:
prime.append(j + low )
snake_case_ : int = high + 1
snake_case_ : Union[str, Any] = min(high + end , lowerCAmelCase_ )
return prime
print(sieve(1_0**6))
| 666 | 1 |
import unittest
from diffusers import FlaxAutoencoderKL
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax
from .test_modeling_common_flax import FlaxModelTesterMixin
if is_flax_available():
import jax
@require_flax
class snake_case__ ( _UpperCamelCase , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : Tuple = FlaxAutoencoderKL
@property
def UpperCAmelCase__ ( self : List[Any] ) -> Dict:
'''simple docstring'''
snake_case_ : str = 4
snake_case_ : Any = 3
snake_case_ : List[Any] = (32, 32)
snake_case_ : Optional[int] = jax.random.PRNGKey(0 )
snake_case_ : List[str] = jax.random.uniform(A__ , ((batch_size, num_channels) + sizes) )
return {"sample": image, "prng_key": prng_key}
def UpperCAmelCase__ ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Optional[int] = {
"block_out_channels": [32, 64],
"in_channels": 3,
"out_channels": 3,
"down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"],
"up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"],
"latent_channels": 4,
}
snake_case_ : int = self.dummy_input
return init_dict, inputs_dict
| 666 | import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ConditionalDetrImageProcessor
class snake_case__ ( unittest.TestCase ):
def __init__( self : List[str] , A__ : List[Any] , A__ : int=7 , A__ : Union[str, Any]=3 , A__ : List[str]=30 , A__ : Optional[int]=4_00 , A__ : Optional[Any]=True , A__ : Optional[int]=None , A__ : Optional[Any]=True , A__ : Any=[0.5, 0.5, 0.5] , A__ : int=[0.5, 0.5, 0.5] , A__ : Any=True , A__ : int=1 / 2_55 , A__ : List[str]=True , ) -> Dict:
'''simple docstring'''
snake_case_ : int = size if size is not None else {"shortest_edge": 18, "longest_edge": 13_33}
snake_case_ : Any = parent
snake_case_ : Optional[int] = batch_size
snake_case_ : List[Any] = num_channels
snake_case_ : Union[str, Any] = min_resolution
snake_case_ : List[Any] = max_resolution
snake_case_ : Tuple = do_resize
snake_case_ : Dict = size
snake_case_ : Optional[Any] = do_normalize
snake_case_ : int = image_mean
snake_case_ : List[Any] = image_std
snake_case_ : Tuple = do_rescale
snake_case_ : Any = rescale_factor
snake_case_ : Optional[int] = do_pad
def UpperCAmelCase__ ( self : int ) -> List[str]:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def UpperCAmelCase__ ( self : Optional[int] , A__ : Optional[int] , A__ : Any=False ) -> Optional[Any]:
'''simple docstring'''
if not batched:
snake_case_ : Any = image_inputs[0]
if isinstance(A__ , Image.Image ):
snake_case_ ,snake_case_ : Dict = image.size
else:
snake_case_ ,snake_case_ : int = image.shape[1], image.shape[2]
if w < h:
snake_case_ : Dict = int(self.size["shortest_edge"] * h / w )
snake_case_ : Optional[int] = self.size["shortest_edge"]
elif w > h:
snake_case_ : Optional[int] = self.size["shortest_edge"]
snake_case_ : str = int(self.size["shortest_edge"] * w / h )
else:
snake_case_ : Optional[int] = self.size["shortest_edge"]
snake_case_ : List[Any] = self.size["shortest_edge"]
else:
snake_case_ : str = []
for image in image_inputs:
snake_case_ ,snake_case_ : Tuple = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
snake_case_ : List[Any] = max(A__ , key=lambda A__ : item[0] )[0]
snake_case_ : int = max(A__ , key=lambda A__ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class snake_case__ ( _UpperCamelCase , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : Optional[int] = ConditionalDetrImageProcessor if is_vision_available() else None
def UpperCAmelCase__ ( self : Tuple ) -> Dict:
'''simple docstring'''
snake_case_ : List[str] = ConditionalDetrImageProcessingTester(self )
@property
def UpperCAmelCase__ ( self : Dict ) -> Tuple:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase__ ( self : Any ) -> Tuple:
'''simple docstring'''
snake_case_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A__ , "image_mean" ) )
self.assertTrue(hasattr(A__ , "image_std" ) )
self.assertTrue(hasattr(A__ , "do_normalize" ) )
self.assertTrue(hasattr(A__ , "do_resize" ) )
self.assertTrue(hasattr(A__ , "size" ) )
def UpperCAmelCase__ ( self : List[str] ) -> Tuple:
'''simple docstring'''
snake_case_ : List[str] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 13_33} )
self.assertEqual(image_processor.do_pad , A__ )
snake_case_ : Optional[int] = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=A__ )
self.assertEqual(image_processor.size , {"shortest_edge": 42, "longest_edge": 84} )
self.assertEqual(image_processor.do_pad , A__ )
def UpperCAmelCase__ ( self : str ) -> Optional[int]:
'''simple docstring'''
pass
def UpperCAmelCase__ ( self : Dict ) -> Tuple:
'''simple docstring'''
snake_case_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case_ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=A__ )
for image in image_inputs:
self.assertIsInstance(A__ , Image.Image )
# Test not batched input
snake_case_ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case_ ,snake_case_ : Optional[Any] = self.image_processor_tester.get_expected_values(A__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case_ ,snake_case_ : List[Any] = self.image_processor_tester.get_expected_values(A__ , batched=A__ )
snake_case_ : int = image_processing(A__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase__ ( self : int ) -> Any:
'''simple docstring'''
snake_case_ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case_ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=A__ , numpify=A__ )
for image in image_inputs:
self.assertIsInstance(A__ , np.ndarray )
# Test not batched input
snake_case_ : int = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case_ ,snake_case_ : List[str] = self.image_processor_tester.get_expected_values(A__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case_ : Optional[int] = image_processing(A__ , return_tensors="pt" ).pixel_values
snake_case_ ,snake_case_ : Dict = self.image_processor_tester.get_expected_values(A__ , batched=A__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase__ ( self : Tuple ) -> str:
'''simple docstring'''
snake_case_ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case_ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=A__ , torchify=A__ )
for image in image_inputs:
self.assertIsInstance(A__ , torch.Tensor )
# Test not batched input
snake_case_ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case_ ,snake_case_ : List[Any] = self.image_processor_tester.get_expected_values(A__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case_ : Any = image_processing(A__ , return_tensors="pt" ).pixel_values
snake_case_ ,snake_case_ : int = self.image_processor_tester.get_expected_values(A__ , batched=A__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def UpperCAmelCase__ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Dict = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
snake_case_ : Optional[Any] = json.loads(f.read() )
snake_case_ : int = {"image_id": 3_97_69, "annotations": target}
# encode them
snake_case_ : Optional[int] = ConditionalDetrImageProcessor.from_pretrained("microsoft/conditional-detr-resnet-50" )
snake_case_ : Any = image_processing(images=A__ , annotations=A__ , return_tensors="pt" )
# verify pixel values
snake_case_ : List[Any] = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["pixel_values"].shape , A__ )
snake_case_ : List[str] = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , A__ , atol=1E-4 ) )
# verify area
snake_case_ : Tuple = torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , A__ ) )
# verify boxes
snake_case_ : Any = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , A__ )
snake_case_ : str = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , A__ , atol=1E-3 ) )
# verify image_id
snake_case_ : List[Any] = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , A__ ) )
# verify is_crowd
snake_case_ : Union[str, Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , A__ ) )
# verify class_labels
snake_case_ : Any = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , A__ ) )
# verify orig_size
snake_case_ : Any = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , A__ ) )
# verify size
snake_case_ : List[str] = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , A__ ) )
@slow
def UpperCAmelCase__ ( self : int ) -> str:
'''simple docstring'''
snake_case_ : Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
snake_case_ : Any = json.loads(f.read() )
snake_case_ : Optional[Any] = {"file_name": "000000039769.png", "image_id": 3_97_69, "segments_info": target}
snake_case_ : int = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
snake_case_ : Union[str, Any] = ConditionalDetrImageProcessor(format="coco_panoptic" )
snake_case_ : str = image_processing(images=A__ , annotations=A__ , masks_path=A__ , return_tensors="pt" )
# verify pixel values
snake_case_ : int = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["pixel_values"].shape , A__ )
snake_case_ : str = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , A__ , atol=1E-4 ) )
# verify area
snake_case_ : Optional[int] = torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , A__ ) )
# verify boxes
snake_case_ : str = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , A__ )
snake_case_ : str = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , A__ , atol=1E-3 ) )
# verify image_id
snake_case_ : List[str] = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , A__ ) )
# verify is_crowd
snake_case_ : Optional[int] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , A__ ) )
# verify class_labels
snake_case_ : Optional[int] = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , A__ ) )
# verify masks
snake_case_ : Union[str, Any] = 82_28_73
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , A__ )
# verify orig_size
snake_case_ : Dict = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , A__ ) )
# verify size
snake_case_ : str = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , A__ ) )
| 666 | 1 |
import json
import os
import unittest
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class snake_case__ ( _UpperCamelCase , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : int = XLMTokenizer
_SCREAMING_SNAKE_CASE : Optional[int] = False
def UpperCAmelCase__ ( self : Dict ) -> Tuple:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
snake_case_ : str = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"w</w>",
"r</w>",
"t</w>",
"lo",
"low",
"er</w>",
"low</w>",
"lowest</w>",
"newer</w>",
"wider</w>",
"<unk>",
]
snake_case_ : Any = dict(zip(A__ , range(len(A__ ) ) ) )
snake_case_ : int = ["l o 123", "lo w 1456", "e r</w> 1789", ""]
snake_case_ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
snake_case_ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" ) as fp:
fp.write(json.dumps(A__ ) )
with open(self.merges_file , "w" ) as fp:
fp.write("\n".join(A__ ) )
def UpperCAmelCase__ ( self : Optional[int] , A__ : int ) -> Any:
'''simple docstring'''
snake_case_ : Any = "lower newer"
snake_case_ : Union[str, Any] = "lower newer"
return input_text, output_text
def UpperCAmelCase__ ( self : int ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : int = XLMTokenizer(self.vocab_file , self.merges_file )
snake_case_ : List[str] = "lower"
snake_case_ : List[Any] = ["low", "er</w>"]
snake_case_ : Optional[int] = tokenizer.tokenize(A__ )
self.assertListEqual(A__ , A__ )
snake_case_ : Any = tokens + ["<unk>"]
snake_case_ : List[str] = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A__ ) , A__ )
@slow
def UpperCAmelCase__ ( self : List[Any] ) -> Tuple:
'''simple docstring'''
snake_case_ : Optional[int] = XLMTokenizer.from_pretrained("xlm-mlm-en-2048" )
snake_case_ : Union[str, Any] = tokenizer.encode("sequence builders" , add_special_tokens=A__ )
snake_case_ : str = tokenizer.encode("multi-sequence build" , add_special_tokens=A__ )
snake_case_ : Optional[int] = tokenizer.build_inputs_with_special_tokens(A__ )
snake_case_ : int = tokenizer.build_inputs_with_special_tokens(A__ , A__ )
assert encoded_sentence == [0] + text + [1]
assert encoded_pair == [0] + text + [1] + text_a + [1]
| 666 | import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
UpperCAmelCase = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class snake_case__ :
_SCREAMING_SNAKE_CASE : str = field(
default=_UpperCamelCase , metadata={"help": "Model type selected in the list: " + ", ".join(_UpperCamelCase )} )
_SCREAMING_SNAKE_CASE : str = field(
default=_UpperCamelCase , metadata={"help": "The input data dir. Should contain the .json files for the SQuAD task."} )
_SCREAMING_SNAKE_CASE : int = field(
default=1_2_8 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
_SCREAMING_SNAKE_CASE : int = field(
default=1_2_8 , metadata={"help": "When splitting up a long document into chunks, how much stride to take between chunks."} , )
_SCREAMING_SNAKE_CASE : int = field(
default=6_4 , metadata={
"help": (
"The maximum number of tokens for the question. Questions longer than this will "
"be truncated to this length."
)
} , )
_SCREAMING_SNAKE_CASE : int = field(
default=3_0 , metadata={
"help": (
"The maximum length of an answer that can be generated. This is needed because the start "
"and end predictions are not conditioned on one another."
)
} , )
_SCREAMING_SNAKE_CASE : bool = field(
default=_UpperCamelCase , metadata={"help": "Overwrite the cached training and evaluation sets"} )
_SCREAMING_SNAKE_CASE : bool = field(
default=_UpperCamelCase , metadata={"help": "If true, the SQuAD examples contain some that do not have an answer."} )
_SCREAMING_SNAKE_CASE : float = field(
default=0.0 , metadata={"help": "If null_score - best_non_null is greater than the threshold predict null."} )
_SCREAMING_SNAKE_CASE : int = field(
default=2_0 , metadata={"help": "If null_score - best_non_null is greater than the threshold predict null."} )
_SCREAMING_SNAKE_CASE : int = field(
default=0 , metadata={
"help": (
"language id of input for language-specific xlm models (see"
" tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)"
)
} , )
_SCREAMING_SNAKE_CASE : int = field(default=1 , metadata={"help": "multiple threads for converting example to features"} )
class snake_case__ ( _UpperCamelCase ):
_SCREAMING_SNAKE_CASE : Tuple = "train"
_SCREAMING_SNAKE_CASE : Any = "dev"
class snake_case__ ( _UpperCamelCase ):
_SCREAMING_SNAKE_CASE : SquadDataTrainingArguments
_SCREAMING_SNAKE_CASE : List[SquadFeatures]
_SCREAMING_SNAKE_CASE : Split
_SCREAMING_SNAKE_CASE : bool
def __init__( self : str , A__ : SquadDataTrainingArguments , A__ : PreTrainedTokenizer , A__ : Optional[int] = None , A__ : Union[str, Split] = Split.train , A__ : Optional[bool] = False , A__ : Optional[str] = None , A__ : Optional[str] = "pt" , ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Tuple = args
snake_case_ : int = is_language_sensitive
snake_case_ : int = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(A__ , A__ ):
try:
snake_case_ : List[str] = Split[mode]
except KeyError:
raise KeyError("mode is not a valid split name" )
snake_case_ : Tuple = mode
# Load data features from cache or dataset file
snake_case_ : Dict = "v2" if args.version_2_with_negative else "v1"
snake_case_ : List[Any] = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f"cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}" , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
snake_case_ : List[Any] = cached_features_file + ".lock"
with FileLock(A__ ):
if os.path.exists(A__ ) and not args.overwrite_cache:
snake_case_ : int = time.time()
snake_case_ : List[Any] = torch.load(A__ )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
snake_case_ : Tuple = self.old_features["features"]
snake_case_ : List[str] = self.old_features.get("dataset" , A__ )
snake_case_ : Tuple = self.old_features.get("examples" , A__ )
logger.info(
f"Loading features from cached file {cached_features_file} [took %.3f s]" , time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
f"Deleting cached file {cached_features_file} will allow dataset and examples to be cached in"
" future run" )
else:
if mode == Split.dev:
snake_case_ : Tuple = self.processor.get_dev_examples(args.data_dir )
else:
snake_case_ : Tuple = self.processor.get_train_examples(args.data_dir )
snake_case_ ,snake_case_ : Optional[Any] = squad_convert_examples_to_features(
examples=self.examples , tokenizer=A__ , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=A__ , )
snake_case_ : Any = time.time()
torch.save(
{"features": self.features, "dataset": self.dataset, "examples": self.examples} , A__ , )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f"Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]" )
def __len__( self : str ) -> Dict:
'''simple docstring'''
return len(self.features )
def __getitem__( self : Optional[int] , A__ : Optional[int] ) -> Dict[str, torch.Tensor]:
'''simple docstring'''
snake_case_ : Any = self.features[i]
snake_case_ : Optional[int] = torch.tensor(feature.input_ids , dtype=torch.long )
snake_case_ : Union[str, Any] = torch.tensor(feature.attention_mask , dtype=torch.long )
snake_case_ : List[Any] = torch.tensor(feature.token_type_ids , dtype=torch.long )
snake_case_ : List[Any] = torch.tensor(feature.cls_index , dtype=torch.long )
snake_case_ : str = torch.tensor(feature.p_mask , dtype=torch.float )
snake_case_ : str = torch.tensor(feature.is_impossible , dtype=torch.float )
snake_case_ : Optional[int] = {
"input_ids": input_ids,
"attention_mask": attention_mask,
"token_type_ids": token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({"cls_index": cls_index, "p_mask": p_mask} )
if self.args.version_2_with_negative:
inputs.update({"is_impossible": is_impossible} )
if self.is_language_sensitive:
inputs.update({"langs": (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
snake_case_ : Any = torch.tensor(feature.start_position , dtype=torch.long )
snake_case_ : List[Any] = torch.tensor(feature.end_position , dtype=torch.long )
inputs.update({"start_positions": start_positions, "end_positions": end_positions} )
return inputs
| 666 | 1 |
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: int ):
snake_case_ : Union[str, Any] = int(lowerCAmelCase_ )
if decimal in (0, 1): # Exit cases for the recursion
return str(lowerCAmelCase_ )
snake_case_ ,snake_case_ : Tuple = divmod(lowerCAmelCase_ , 2 )
return binary_recursive(lowerCAmelCase_ ) + str(lowerCAmelCase_ )
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: str ):
snake_case_ : Dict = str(lowerCAmelCase_ ).strip()
if not number:
raise ValueError("No input value was provided" )
snake_case_ : Optional[int] = "-" if number.startswith("-" ) else ""
snake_case_ : Any = number.lstrip("-" )
if not number.isnumeric():
raise ValueError("Input value is not an integer" )
return f"{negative}0b{binary_recursive(int(lowerCAmelCase_ ) )}"
if __name__ == "__main__":
from doctest import testmod
testmod()
| 666 | import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
"microsoft/git-base": "https://huggingface.co/microsoft/git-base/resolve/main/config.json",
}
class snake_case__ ( _UpperCamelCase ):
_SCREAMING_SNAKE_CASE : Dict = "git_vision_model"
def __init__( self : int , A__ : Union[str, Any]=7_68 , A__ : List[Any]=30_72 , A__ : Tuple=12 , A__ : Optional[Any]=12 , A__ : Optional[int]=3 , A__ : List[str]=2_24 , A__ : Dict=16 , A__ : int="quick_gelu" , A__ : Any=1E-5 , A__ : Tuple=0.0 , A__ : Optional[int]=0.02 , **A__ : List[str] , ) -> Optional[int]:
'''simple docstring'''
super().__init__(**A__ )
snake_case_ : Optional[Any] = hidden_size
snake_case_ : str = intermediate_size
snake_case_ : Optional[Any] = num_hidden_layers
snake_case_ : int = num_attention_heads
snake_case_ : Optional[int] = num_channels
snake_case_ : Union[str, Any] = patch_size
snake_case_ : List[str] = image_size
snake_case_ : List[Any] = initializer_range
snake_case_ : Any = attention_dropout
snake_case_ : Any = layer_norm_eps
snake_case_ : int = hidden_act
@classmethod
def UpperCAmelCase__ ( cls : List[Any] , A__ : Union[str, os.PathLike] , **A__ : Optional[int] ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(A__ )
snake_case_ ,snake_case_ : Tuple = cls.get_config_dict(A__ , **A__ )
# get the vision config dict if we are loading from GITConfig
if config_dict.get("model_type" ) == "git":
snake_case_ : Any = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(A__ , **A__ )
class snake_case__ ( _UpperCamelCase ):
_SCREAMING_SNAKE_CASE : Optional[Any] = "git"
def __init__( self : Any , A__ : List[str]=None , A__ : List[str]=3_05_22 , A__ : Tuple=7_68 , A__ : Tuple=6 , A__ : str=12 , A__ : Any=30_72 , A__ : List[str]="gelu" , A__ : int=0.1 , A__ : Dict=0.1 , A__ : Any=10_24 , A__ : Optional[Any]=0.02 , A__ : Optional[Any]=1E-12 , A__ : Dict=0 , A__ : Any="absolute" , A__ : Tuple=True , A__ : Any=False , A__ : Tuple=1_01 , A__ : Tuple=1_02 , A__ : List[Any]=None , **A__ : List[str] , ) -> int:
'''simple docstring'''
super().__init__(bos_token_id=A__ , eos_token_id=A__ , pad_token_id=A__ , **A__ )
if vision_config is None:
snake_case_ : int = {}
logger.info("vision_config is None. initializing the GitVisionConfig with default values." )
snake_case_ : str = GitVisionConfig(**A__ )
snake_case_ : int = vocab_size
snake_case_ : List[Any] = hidden_size
snake_case_ : Tuple = num_hidden_layers
snake_case_ : List[Any] = num_attention_heads
snake_case_ : Any = hidden_act
snake_case_ : Dict = intermediate_size
snake_case_ : Any = hidden_dropout_prob
snake_case_ : Any = attention_probs_dropout_prob
snake_case_ : Union[str, Any] = max_position_embeddings
snake_case_ : List[str] = initializer_range
snake_case_ : List[str] = layer_norm_eps
snake_case_ : Any = position_embedding_type
snake_case_ : Union[str, Any] = use_cache
snake_case_ : str = tie_word_embeddings
snake_case_ : List[Any] = num_image_with_embedding
snake_case_ : Dict = bos_token_id
snake_case_ : int = eos_token_id
def UpperCAmelCase__ ( self : Any ) -> int:
'''simple docstring'''
snake_case_ : Tuple = copy.deepcopy(self.__dict__ )
snake_case_ : Optional[int] = self.vision_config.to_dict()
snake_case_ : Tuple = self.__class__.model_type
return output
| 666 | 1 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: Dict ):
snake_case_ : List[Any] = [2, 2, 6, 2] if "tiny" in model_name else [2, 2, 1_8, 2]
snake_case_ : List[str] = True if "large" in model_name or "huge" in model_name else False
snake_case_ : Union[str, Any] = True if "large" in model_name or "huge" in model_name else False
snake_case_ : List[Any] = True if "large" in model_name or "huge" in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
snake_case_ : int = [3, 3, 3, 3]
snake_case_ : Dict = [5, 5, 5, 5]
elif "fl4" in model_name:
snake_case_ : Tuple = [4, 4, 4, 4]
snake_case_ : Any = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
snake_case_ : List[Any] = [3, 3, 3, 3]
if "lrf" in model_name:
snake_case_ : Dict = [3, 3, 3, 3]
else:
snake_case_ : int = [2, 2, 2, 2]
if "tiny" in model_name:
snake_case_ : Union[str, Any] = 9_6
elif "small" in model_name:
snake_case_ : str = 9_6
elif "base" in model_name:
snake_case_ : Optional[int] = 1_2_8
elif "large" in model_name:
snake_case_ : List[str] = 1_9_2
elif "xlarge" in model_name:
snake_case_ : List[str] = 2_5_6
elif "huge" in model_name:
snake_case_ : Dict = 3_5_2
# set label information
snake_case_ : int = "huggingface/label-files"
if "large" in model_name or "huge" in model_name:
snake_case_ : Dict = "imagenet-22k-id2label.json"
else:
snake_case_ : List[Any] = "imagenet-1k-id2label.json"
snake_case_ : Any = json.load(open(hf_hub_download(lowerCAmelCase_ , lowerCAmelCase_ , repo_type="dataset" ) , "r" ) )
snake_case_ : Optional[int] = {int(lowerCAmelCase_ ): v for k, v in idalabel.items()}
snake_case_ : Optional[Any] = {v: k for k, v in idalabel.items()}
snake_case_ : Optional[Any] = FocalNetConfig(
embed_dim=lowerCAmelCase_ , depths=lowerCAmelCase_ , focal_levels=lowerCAmelCase_ , focal_windows=lowerCAmelCase_ , use_conv_embed=lowerCAmelCase_ , idalabel=lowerCAmelCase_ , labelaid=lowerCAmelCase_ , use_post_layernorm=lowerCAmelCase_ , use_layerscale=lowerCAmelCase_ , )
return config
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: List[str] ):
if "patch_embed.proj" in name:
snake_case_ : Dict = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
snake_case_ : List[Any] = name.replace("patch_embed.norm" , "embeddings.norm" )
if "layers" in name:
snake_case_ : Dict = "encoder." + name
if "encoder.layers" in name:
snake_case_ : int = name.replace("encoder.layers" , "encoder.stages" )
if "downsample.proj" in name:
snake_case_ : Optional[Any] = name.replace("downsample.proj" , "downsample.projection" )
if "blocks" in name:
snake_case_ : Any = name.replace("blocks" , "layers" )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
snake_case_ : int = name.replace("modulation.f" , "modulation.projection_in" )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
snake_case_ : Any = name.replace("modulation.h" , "modulation.projection_context" )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
snake_case_ : Union[str, Any] = name.replace("modulation.proj" , "modulation.projection_out" )
if name == "norm.weight":
snake_case_ : List[Any] = "layernorm.weight"
if name == "norm.bias":
snake_case_ : Any = "layernorm.bias"
if "head" in name:
snake_case_ : Tuple = name.replace("head" , "classifier" )
else:
snake_case_ : int = "focalnet." + name
return name
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: str , lowerCAmelCase_: Any , lowerCAmelCase_: str=False ):
# fmt: off
snake_case_ : Optional[int] = {
"focalnet-tiny": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth",
"focalnet-tiny-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth",
"focalnet-small": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth",
"focalnet-small-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth",
"focalnet-base": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth",
"focalnet-base-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth",
"focalnet-large-lrf-fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth",
"focalnet-large-lrf-fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth",
"focalnet-xlarge-lrf-fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth",
"focalnet-xlarge-lrf-fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth",
}
# fmt: on
snake_case_ : List[Any] = model_name_to_url[model_name]
print("Checkpoint URL: " , lowerCAmelCase_ )
snake_case_ : int = torch.hub.load_state_dict_from_url(lowerCAmelCase_ , map_location="cpu" )["model"]
# rename keys
for key in state_dict.copy().keys():
snake_case_ : Union[str, Any] = state_dict.pop(lowerCAmelCase_ )
snake_case_ : Optional[Any] = val
snake_case_ : Dict = get_focalnet_config(lowerCAmelCase_ )
snake_case_ : List[Any] = FocalNetForImageClassification(lowerCAmelCase_ )
model.eval()
# load state dict
model.load_state_dict(lowerCAmelCase_ )
# verify conversion
snake_case_ : Optional[Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
snake_case_ : Tuple = BitImageProcessor(
do_resize=lowerCAmelCase_ , size={"shortest_edge": 2_5_6} , resample=PILImageResampling.BILINEAR , do_center_crop=lowerCAmelCase_ , crop_size=2_2_4 , do_normalize=lowerCAmelCase_ , image_mean=lowerCAmelCase_ , image_std=lowerCAmelCase_ , )
snake_case_ : Union[str, Any] = Image.open(requests.get(lowerCAmelCase_ , stream=lowerCAmelCase_ ).raw )
snake_case_ : List[str] = processor(images=lowerCAmelCase_ , return_tensors="pt" )
snake_case_ : List[str] = transforms.Compose(
[
transforms.Resize(2_5_6 ),
transforms.CenterCrop(2_2_4 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , std=[0.2_2_9, 0.2_2_4, 0.2_2_5] ),
] )
snake_case_ : Union[str, Any] = image_transforms(lowerCAmelCase_ ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , lowerCAmelCase_ , atol=1e-4 )
snake_case_ : List[Any] = model(**lowerCAmelCase_ )
snake_case_ : Optional[int] = outputs.logits.argmax(-1 ).item()
print("Predicted class:" , model.config.idalabel[predicted_class_idx] )
print("First values of logits:" , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
snake_case_ : Any = torch.tensor([0.2_1_6_6, -0.4_3_6_8, 0.2_1_9_1] )
elif model_name == "focalnet-tiny-lrf":
snake_case_ : Union[str, Any] = torch.tensor([1.1_6_6_9, 0.0_1_2_5, -0.1_6_9_5] )
elif model_name == "focalnet-small":
snake_case_ : Dict = torch.tensor([0.4_9_1_7, -0.0_4_3_0, 0.1_3_4_1] )
elif model_name == "focalnet-small-lrf":
snake_case_ : Tuple = torch.tensor([-0.2_5_8_8, -0.5_3_4_2, -0.2_3_3_1] )
elif model_name == "focalnet-base":
snake_case_ : Optional[Any] = torch.tensor([-0.1_6_5_5, -0.4_0_9_0, -0.1_7_3_0] )
elif model_name == "focalnet-base-lrf":
snake_case_ : Optional[Any] = torch.tensor([0.5_3_0_6, -0.0_4_8_3, -0.3_9_2_8] )
assert torch.allclose(outputs.logits[0, :3] , lowerCAmelCase_ , atol=1e-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(f"Saving model and processor of {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(lowerCAmelCase_ )
processor.save_pretrained(lowerCAmelCase_ )
if push_to_hub:
print(f"Pushing model and processor of {model_name} to the hub..." )
model.push_to_hub(f"{model_name}" )
processor.push_to_hub(f"{model_name}" )
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="focalnet-tiny",
type=str,
help="Name of the FocalNet model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model and processor to the hub.",
)
UpperCAmelCase = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 666 | def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: str , lowerCAmelCase_: str ):
def get_matched_characters(lowerCAmelCase_: str , lowerCAmelCase_: str ) -> str:
snake_case_ : Tuple = []
snake_case_ : Tuple = min(len(_stra ) , len(_stra ) ) // 2
for i, l in enumerate(_stra ):
snake_case_ : str = int(max(0 , i - limit ) )
snake_case_ : Optional[int] = int(min(i + limit + 1 , len(_stra ) ) )
if l in _stra[left:right]:
matched.append(lowerCAmelCase_ )
snake_case_ : List[Any] = f"{_stra[0:_stra.index(lowerCAmelCase_ )]} {_stra[_stra.index(lowerCAmelCase_ ) + 1:]}"
return "".join(lowerCAmelCase_ )
# matching characters
snake_case_ : List[Any] = get_matched_characters(lowerCAmelCase_ , lowerCAmelCase_ )
snake_case_ : int = get_matched_characters(lowerCAmelCase_ , lowerCAmelCase_ )
snake_case_ : Optional[int] = len(lowerCAmelCase_ )
# transposition
snake_case_ : List[str] = (
len([(ca, ca) for ca, ca in zip(lowerCAmelCase_ , lowerCAmelCase_ ) if ca != ca] ) // 2
)
if not match_count:
snake_case_ : str = 0.0
else:
snake_case_ : Optional[Any] = (
1
/ 3
* (
match_count / len(lowerCAmelCase_ )
+ match_count / len(lowerCAmelCase_ )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
snake_case_ : Optional[Any] = 0
for ca, ca in zip(stra[:4] , stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler("hello", "world"))
| 666 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class snake_case__ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : int = StableUnCLIPImgaImgPipeline
_SCREAMING_SNAKE_CASE : Any = TEXT_GUIDED_IMAGE_VARIATION_PARAMS
_SCREAMING_SNAKE_CASE : int = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
_SCREAMING_SNAKE_CASE : List[str] = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
_SCREAMING_SNAKE_CASE : Optional[Any] = frozenset([] )
def UpperCAmelCase__ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Union[str, Any] = 32
snake_case_ : str = embedder_hidden_size
# image encoding components
snake_case_ : Tuple = CLIPImageProcessor(crop_size=32 , size=32 )
torch.manual_seed(0 )
snake_case_ : Optional[Any] = CLIPVisionModelWithProjection(
CLIPVisionConfig(
hidden_size=A__ , projection_dim=A__ , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ) )
# regular denoising components
torch.manual_seed(0 )
snake_case_ : int = StableUnCLIPImageNormalizer(embedding_dim=A__ )
snake_case_ : Union[str, Any] = DDPMScheduler(beta_schedule="squaredcos_cap_v2" )
torch.manual_seed(0 )
snake_case_ : Optional[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
snake_case_ : Dict = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=A__ , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) )
torch.manual_seed(0 )
snake_case_ : Tuple = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="projection" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=A__ , layers_per_block=1 , upcast_attention=A__ , use_linear_projection=A__ , )
torch.manual_seed(0 )
snake_case_ : Optional[Any] = DDIMScheduler(
beta_schedule="scaled_linear" , beta_start=0.0_0085 , beta_end=0.012 , prediction_type="v_prediction" , set_alpha_to_one=A__ , steps_offset=1 , )
torch.manual_seed(0 )
snake_case_ : Any = AutoencoderKL()
snake_case_ : Optional[Any] = {
# image encoding components
"feature_extractor": feature_extractor,
"image_encoder": image_encoder.eval(),
# image noising components
"image_normalizer": image_normalizer.eval(),
"image_noising_scheduler": image_noising_scheduler,
# regular denoising components
"tokenizer": tokenizer,
"text_encoder": text_encoder.eval(),
"unet": unet.eval(),
"scheduler": scheduler,
"vae": vae.eval(),
}
return components
def UpperCAmelCase__ ( self : Any , A__ : Optional[int] , A__ : Tuple=0 , A__ : str=True ) -> Union[str, Any]:
'''simple docstring'''
if str(A__ ).startswith("mps" ):
snake_case_ : List[Any] = torch.manual_seed(A__ )
else:
snake_case_ : str = torch.Generator(device=A__ ).manual_seed(A__ )
snake_case_ : Optional[int] = floats_tensor((1, 3, 32, 32) , rng=random.Random(A__ ) ).to(A__ )
if pil_image:
snake_case_ : Optional[int] = input_image * 0.5 + 0.5
snake_case_ : Optional[Any] = input_image.clamp(0 , 1 )
snake_case_ : List[str] = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
snake_case_ : Any = DiffusionPipeline.numpy_to_pil(A__ )[0]
return {
"prompt": "An anime racoon running a marathon",
"image": input_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
@skip_mps
def UpperCAmelCase__ ( self : str ) -> Tuple:
'''simple docstring'''
snake_case_ : Tuple = "cpu" # ensure determinism for the device-dependent torch.Generator
snake_case_ : str = self.get_dummy_components()
snake_case_ : str = StableUnCLIPImgaImgPipeline(**A__ )
snake_case_ : Tuple = sd_pipe.to(A__ )
sd_pipe.set_progress_bar_config(disable=A__ )
snake_case_ : Optional[Any] = self.get_dummy_inputs(A__ )
inputs.update({"image_embeds": None} )
snake_case_ : Tuple = sd_pipe(**A__ ).images
snake_case_ : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
snake_case_ : Optional[Any] = np.array([0.3872, 0.7224, 0.5601, 0.4741, 0.6872, 0.5814, 0.4636, 0.3867, 0.5078] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def UpperCAmelCase__ ( self : int ) -> Dict:
'''simple docstring'''
snake_case_ : Any = torch_device in ["cpu", "mps"]
self._test_attention_slicing_forward_pass(test_max_difference=A__ )
def UpperCAmelCase__ ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Any = torch_device in ["cpu", "mps"]
self._test_inference_batch_single_identical(test_max_difference=A__ )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def UpperCAmelCase__ ( self : Dict ) -> Dict:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(test_max_difference=A__ )
@slow
@require_torch_gpu
class snake_case__ ( unittest.TestCase ):
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
snake_case_ : int = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" )
snake_case_ : int = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy" )
snake_case_ : Any = StableUnCLIPImgaImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-l-img2img" , torch_dtype=torch.floataa )
pipe.to(A__ )
pipe.set_progress_bar_config(disable=A__ )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
snake_case_ : Dict = torch.Generator(device="cpu" ).manual_seed(0 )
snake_case_ : int = pipe(A__ , "anime turle" , generator=A__ , output_type="np" )
snake_case_ : Dict = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(A__ , A__ )
def UpperCAmelCase__ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
snake_case_ : List[str] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" )
snake_case_ : List[str] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy" )
snake_case_ : int = StableUnCLIPImgaImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-h-img2img" , torch_dtype=torch.floataa )
pipe.to(A__ )
pipe.set_progress_bar_config(disable=A__ )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
snake_case_ : List[str] = torch.Generator(device="cpu" ).manual_seed(0 )
snake_case_ : Optional[int] = pipe(A__ , "anime turle" , generator=A__ , output_type="np" )
snake_case_ : Optional[Any] = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(A__ , A__ )
def UpperCAmelCase__ ( self : Any ) -> str:
'''simple docstring'''
snake_case_ : Optional[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" )
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
snake_case_ : Dict = StableUnCLIPImgaImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-h-img2img" , torch_dtype=torch.floataa )
snake_case_ : Any = pipe.to(A__ )
pipe.set_progress_bar_config(disable=A__ )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
snake_case_ : List[str] = pipe(
A__ , "anime turtle" , num_inference_steps=2 , output_type="np" , )
snake_case_ : Optional[Any] = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 666 | import argparse
import os
from pathlib import Path
import torch
from bark.generation import _load_model as _bark_load_model
from huggingface_hub import hf_hub_download
from transformers import EncodecConfig, EncodecModel, set_seed
from transformers.models.bark.configuration_bark import (
BarkCoarseConfig,
BarkConfig,
BarkFineConfig,
BarkSemanticConfig,
)
from transformers.models.bark.generation_configuration_bark import (
BarkCoarseGenerationConfig,
BarkFineGenerationConfig,
BarkGenerationConfig,
BarkSemanticGenerationConfig,
)
from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase = logging.get_logger(__name__)
set_seed(7_7_0)
UpperCAmelCase = {
"c_attn": "att_proj",
"c_proj": "out_proj",
"c_fc": "in_proj",
"transformer.": "",
"h.": "layers.",
"ln_1": "layernorm_1",
"ln_2": "layernorm_2",
"ln_f": "layernorm_final",
"wpe": "position_embeds_layer",
"wte": "input_embeds_layer",
}
UpperCAmelCase = {
"text_small": {
"repo_id": "suno/bark",
"file_name": "text.pt",
},
"coarse_small": {
"repo_id": "suno/bark",
"file_name": "coarse.pt",
},
"fine_small": {
"repo_id": "suno/bark",
"file_name": "fine.pt",
},
"text": {
"repo_id": "suno/bark",
"file_name": "text_2.pt",
},
"coarse": {
"repo_id": "suno/bark",
"file_name": "coarse_2.pt",
},
"fine": {
"repo_id": "suno/bark",
"file_name": "fine_2.pt",
},
}
UpperCAmelCase = os.path.dirname(os.path.abspath(__file__))
UpperCAmelCase = os.path.join(os.path.expanduser("~"), ".cache")
UpperCAmelCase = os.path.join(os.getenv("XDG_CACHE_HOME", default_cache_dir), "suno", "bark_v0")
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: int , lowerCAmelCase_: List[str]=False ):
snake_case_ : Union[str, Any] = model_type
if use_small:
key += "_small"
return os.path.join(lowerCAmelCase_ , REMOTE_MODEL_PATHS[key]["file_name"] )
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: str , lowerCAmelCase_: List[str] ):
os.makedirs(lowerCAmelCase_ , exist_ok=lowerCAmelCase_ )
hf_hub_download(repo_id=lowerCAmelCase_ , filename=lowerCAmelCase_ , local_dir=lowerCAmelCase_ )
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: Any , lowerCAmelCase_: Dict , lowerCAmelCase_: List[str]=False , lowerCAmelCase_: Dict="text" ):
if model_type == "text":
snake_case_ : int = BarkSemanticModel
snake_case_ : str = BarkSemanticConfig
snake_case_ : Optional[Any] = BarkSemanticGenerationConfig
elif model_type == "coarse":
snake_case_ : str = BarkCoarseModel
snake_case_ : Optional[int] = BarkCoarseConfig
snake_case_ : Any = BarkCoarseGenerationConfig
elif model_type == "fine":
snake_case_ : Optional[int] = BarkFineModel
snake_case_ : Tuple = BarkFineConfig
snake_case_ : List[str] = BarkFineGenerationConfig
else:
raise NotImplementedError()
snake_case_ : Optional[Any] = f"{model_type}_small" if use_small else model_type
snake_case_ : Any = REMOTE_MODEL_PATHS[model_key]
if not os.path.exists(lowerCAmelCase_ ):
logger.info(f"{model_type} model not found, downloading into `{CACHE_DIR}`." )
_download(model_info["repo_id"] , model_info["file_name"] )
snake_case_ : Any = torch.load(lowerCAmelCase_ , map_location=lowerCAmelCase_ )
# this is a hack
snake_case_ : Union[str, Any] = checkpoint["model_args"]
if "input_vocab_size" not in model_args:
snake_case_ : str = model_args["vocab_size"]
snake_case_ : Union[str, Any] = model_args["vocab_size"]
del model_args["vocab_size"]
# convert Bark model arguments to HF Bark model arguments
snake_case_ : Union[str, Any] = model_args.pop("n_head" )
snake_case_ : int = model_args.pop("n_embd" )
snake_case_ : Any = model_args.pop("n_layer" )
snake_case_ : List[str] = ConfigClass(**checkpoint["model_args"] )
snake_case_ : Optional[Any] = ModelClass(config=lowerCAmelCase_ )
snake_case_ : Tuple = GenerationConfigClass()
snake_case_ : List[str] = model_generation_config
snake_case_ : Optional[int] = checkpoint["model"]
# fixup checkpoint
snake_case_ : Optional[int] = "_orig_mod."
for k, v in list(state_dict.items() ):
if k.startswith(lowerCAmelCase_ ):
# replace part of the key with corresponding layer name in HF implementation
snake_case_ : Tuple = k[len(lowerCAmelCase_ ) :]
for old_layer_name in new_layer_name_dict:
snake_case_ : int = new_k.replace(lowerCAmelCase_ , new_layer_name_dict[old_layer_name] )
snake_case_ : int = state_dict.pop(lowerCAmelCase_ )
snake_case_ : Optional[int] = set(state_dict.keys() ) - set(model.state_dict().keys() )
snake_case_ : str = {k for k in extra_keys if not k.endswith(".attn.bias" )}
snake_case_ : Any = set(model.state_dict().keys() ) - set(state_dict.keys() )
snake_case_ : List[Any] = {k for k in missing_keys if not k.endswith(".attn.bias" )}
if len(lowerCAmelCase_ ) != 0:
raise ValueError(f"extra keys found: {extra_keys}" )
if len(lowerCAmelCase_ ) != 0:
raise ValueError(f"missing keys: {missing_keys}" )
model.load_state_dict(lowerCAmelCase_ , strict=lowerCAmelCase_ )
snake_case_ : str = model.num_parameters(exclude_embeddings=lowerCAmelCase_ )
snake_case_ : Union[str, Any] = checkpoint["best_val_loss"].item()
logger.info(f"model loaded: {round(n_params/1e6 , 1 )}M params, {round(lowerCAmelCase_ , 3 )} loss" )
model.eval()
model.to(lowerCAmelCase_ )
del checkpoint, state_dict
return model
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: List[Any] , lowerCAmelCase_: str=False , lowerCAmelCase_: int="text" ):
if model_type not in ("text", "coarse", "fine"):
raise NotImplementedError()
snake_case_ : int = "cpu" # do conversion on cpu
snake_case_ : Optional[Any] = _get_ckpt_path(lowerCAmelCase_ , use_small=lowerCAmelCase_ )
snake_case_ : Tuple = _load_model(lowerCAmelCase_ , lowerCAmelCase_ , model_type=lowerCAmelCase_ , use_small=lowerCAmelCase_ )
# load bark initial model
snake_case_ : int = _bark_load_model(lowerCAmelCase_ , "cpu" , model_type=lowerCAmelCase_ , use_small=lowerCAmelCase_ )
if model_type == "text":
snake_case_ : Union[str, Any] = bark_model["model"]
if model.num_parameters(exclude_embeddings=lowerCAmelCase_ ) != bark_model.get_num_params():
raise ValueError("initial and new models don't have the same number of parameters" )
# check if same output as the bark model
snake_case_ : Optional[Any] = 5
snake_case_ : Optional[int] = 1_0
if model_type in ["text", "coarse"]:
snake_case_ : Optional[Any] = torch.randint(2_5_6 , (batch_size, sequence_length) , dtype=torch.int )
snake_case_ : str = bark_model(lowerCAmelCase_ )[0]
snake_case_ : Tuple = model(lowerCAmelCase_ )
# take last logits
snake_case_ : List[str] = output_new_model_total.logits[:, [-1], :]
else:
snake_case_ : Optional[int] = 3
snake_case_ : str = 8
snake_case_ : List[str] = torch.randint(2_5_6 , (batch_size, sequence_length, n_codes_total) , dtype=torch.int )
snake_case_ : Any = model(lowerCAmelCase_ , lowerCAmelCase_ )
snake_case_ : Union[str, Any] = bark_model(lowerCAmelCase_ , lowerCAmelCase_ )
snake_case_ : Optional[int] = output_new_model_total.logits
# output difference should come from the difference of self-attention implementation design
if output_new_model.shape != output_old_model.shape:
raise ValueError("initial and new outputs don't have the same shape" )
if (output_new_model - output_old_model).abs().max().item() > 1e-3:
raise ValueError("initial and new outputs are not equal" )
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
model.save_pretrained(lowerCAmelCase_ )
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: Tuple , lowerCAmelCase_: List[str] , lowerCAmelCase_: Any , lowerCAmelCase_: List[Any] , lowerCAmelCase_: int , lowerCAmelCase_: Optional[Any] , ):
snake_case_ : Optional[Any] = os.path.join(lowerCAmelCase_ , lowerCAmelCase_ )
snake_case_ : Optional[Any] = BarkSemanticConfig.from_pretrained(os.path.join(lowerCAmelCase_ , "config.json" ) )
snake_case_ : List[Any] = BarkCoarseConfig.from_pretrained(os.path.join(lowerCAmelCase_ , "config.json" ) )
snake_case_ : List[str] = BarkFineConfig.from_pretrained(os.path.join(lowerCAmelCase_ , "config.json" ) )
snake_case_ : List[Any] = EncodecConfig.from_pretrained("facebook/encodec_24khz" )
snake_case_ : List[str] = BarkSemanticModel.from_pretrained(lowerCAmelCase_ )
snake_case_ : Optional[Any] = BarkCoarseModel.from_pretrained(lowerCAmelCase_ )
snake_case_ : Tuple = BarkFineModel.from_pretrained(lowerCAmelCase_ )
snake_case_ : Union[str, Any] = EncodecModel.from_pretrained("facebook/encodec_24khz" )
snake_case_ : Tuple = BarkConfig.from_sub_model_configs(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
snake_case_ : List[Any] = BarkGenerationConfig.from_sub_model_configs(
semantic.generation_config , coarseAcoustic.generation_config , fineAcoustic.generation_config )
snake_case_ : Optional[int] = BarkModel(lowerCAmelCase_ )
snake_case_ : int = semantic
snake_case_ : List[str] = coarseAcoustic
snake_case_ : str = fineAcoustic
snake_case_ : Optional[Any] = codec
snake_case_ : Any = bark_generation_config
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
bark.save_pretrained(lowerCAmelCase_ , repo_id=lowerCAmelCase_ , push_to_hub=lowerCAmelCase_ )
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument("model_type", type=str, help="text, coarse or fine.")
parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--is_small", action="store_true", help="convert the small version instead of the large.")
UpperCAmelCase = parser.parse_args()
load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small)
| 666 | 1 |
import functools
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: list[int] , lowerCAmelCase_: list[int] ):
# Validation
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) or not all(isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) for day in days ):
raise ValueError("The parameter days should be a list of integers" )
if len(lowerCAmelCase_ ) != 3 or not all(isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) for cost in costs ):
raise ValueError("The parameter costs should be a list of three integers" )
if len(lowerCAmelCase_ ) == 0:
return 0
if min(lowerCAmelCase_ ) <= 0:
raise ValueError("All days elements should be greater than 0" )
if max(lowerCAmelCase_ ) >= 3_6_6:
raise ValueError("All days elements should be less than 366" )
snake_case_ : Dict = set(lowerCAmelCase_ )
@functools.cache
def dynamic_programming(lowerCAmelCase_: int ) -> int:
if index > 3_6_5:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 3_0 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 666 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase = {
"configuration_upernet": ["UperNetConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
"UperNetForSemanticSegmentation",
"UperNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_upernet import UperNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_upernet import UperNetForSemanticSegmentation, UperNetPreTrainedModel
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 666 | 1 |
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
UpperCAmelCase = "\\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n"
UpperCAmelCase = "\\nGLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n"
UpperCAmelCase = "\nCompute GLUE evaluation metric associated to each GLUE dataset.\nArgs:\n predictions: list of predictions to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\nReturns: depending on the GLUE subset, one or several of:\n \"accuracy\": Accuracy\n \"f1\": F1 score\n \"pearson\": Pearson Correlation\n \"spearmanr\": Spearman Correlation\n \"matthews_correlation\": Matthew Correlation\nExamples:\n\n >>> glue_metric = datasets.load_metric('glue', 'sst2') # 'sst2' or any of [\"mnli\", \"mnli_mismatched\", \"mnli_matched\", \"qnli\", \"rte\", \"wnli\", \"hans\"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0}\n\n >>> glue_metric = datasets.load_metric('glue', 'mrpc') # 'mrpc' or 'qqp'\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0, 'f1': 1.0}\n\n >>> glue_metric = datasets.load_metric('glue', 'stsb')\n >>> references = [0., 1., 2., 3., 4., 5.]\n >>> predictions = [0., 1., 2., 3., 4., 5.]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print({\"pearson\": round(results[\"pearson\"], 2), \"spearmanr\": round(results[\"spearmanr\"], 2)})\n {'pearson': 1.0, 'spearmanr': 1.0}\n\n >>> glue_metric = datasets.load_metric('glue', 'cola')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'matthews_correlation': 1.0}\n"
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: List[Any] , lowerCAmelCase_: Dict ):
return float((preds == labels).mean() )
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: Dict , lowerCAmelCase_: Union[str, Any] ):
snake_case_ : List[str] = simple_accuracy(lowerCAmelCase_ , lowerCAmelCase_ )
snake_case_ : Any = float(fa_score(y_true=lowerCAmelCase_ , y_pred=lowerCAmelCase_ ) )
return {
"accuracy": acc,
"f1": fa,
}
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: Dict , lowerCAmelCase_: int ):
snake_case_ : str = float(pearsonr(lowerCAmelCase_ , lowerCAmelCase_ )[0] )
snake_case_ : int = float(spearmanr(lowerCAmelCase_ , lowerCAmelCase_ )[0] )
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case__ ( datasets.Metric ):
def UpperCAmelCase__ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
if self.config_name not in [
"sst2",
"mnli",
"mnli_mismatched",
"mnli_matched",
"cola",
"stsb",
"mrpc",
"qqp",
"qnli",
"rte",
"wnli",
"hans",
]:
raise KeyError(
"You should supply a configuration name selected in "
"[\"sst2\", \"mnli\", \"mnli_mismatched\", \"mnli_matched\", "
"\"cola\", \"stsb\", \"mrpc\", \"qqp\", \"qnli\", \"rte\", \"wnli\", \"hans\"]" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("int64" if self.config_name != "stsb" else "float32" ),
"references": datasets.Value("int64" if self.config_name != "stsb" else "float32" ),
} ) , codebase_urls=[] , reference_urls=[] , format="numpy" , )
def UpperCAmelCase__ ( self : Optional[int] , A__ : List[str] , A__ : List[Any] ) -> Tuple:
'''simple docstring'''
if self.config_name == "cola":
return {"matthews_correlation": matthews_corrcoef(A__ , A__ )}
elif self.config_name == "stsb":
return pearson_and_spearman(A__ , A__ )
elif self.config_name in ["mrpc", "qqp"]:
return acc_and_fa(A__ , A__ )
elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]:
return {"accuracy": simple_accuracy(A__ , A__ )}
else:
raise KeyError(
"You should supply a configuration name selected in "
"[\"sst2\", \"mnli\", \"mnli_mismatched\", \"mnli_matched\", "
"\"cola\", \"stsb\", \"mrpc\", \"qqp\", \"qnli\", \"rte\", \"wnli\", \"hans\"]" )
| 666 | from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
UpperCAmelCase = logging.get_logger(__name__)
class snake_case__ ( _UpperCamelCase ):
_SCREAMING_SNAKE_CASE : str = ["pixel_values"]
def __init__( self : List[Any] , A__ : bool = True , A__ : Optional[Dict[str, int]] = None , A__ : PILImageResampling = PILImageResampling.BILINEAR , A__ : bool = True , A__ : Dict[str, int] = None , A__ : bool = True , A__ : Union[int, float] = 1 / 2_55 , A__ : bool = True , A__ : Optional[Union[float, List[float]]] = None , A__ : Optional[Union[float, List[float]]] = None , **A__ : int , ) -> None:
'''simple docstring'''
super().__init__(**A__ )
snake_case_ : Optional[int] = size if size is not None else {"shortest_edge": 2_56}
snake_case_ : Dict = get_size_dict(A__ , default_to_square=A__ )
snake_case_ : List[str] = crop_size if crop_size is not None else {"height": 2_24, "width": 2_24}
snake_case_ : Any = get_size_dict(A__ , param_name="crop_size" )
snake_case_ : int = do_resize
snake_case_ : Optional[Any] = size
snake_case_ : Optional[Any] = resample
snake_case_ : Optional[int] = do_center_crop
snake_case_ : List[Any] = crop_size
snake_case_ : List[Any] = do_rescale
snake_case_ : Optional[int] = rescale_factor
snake_case_ : Optional[Any] = do_normalize
snake_case_ : List[Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
snake_case_ : Optional[Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCAmelCase__ ( self : List[str] , A__ : np.ndarray , A__ : Dict[str, int] , A__ : PILImageResampling = PILImageResampling.BICUBIC , A__ : Optional[Union[str, ChannelDimension]] = None , **A__ : str , ) -> np.ndarray:
'''simple docstring'''
snake_case_ : Optional[Any] = get_size_dict(A__ , default_to_square=A__ )
if "shortest_edge" not in size:
raise ValueError(f"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}" )
snake_case_ : Any = get_resize_output_image_size(A__ , size=size["shortest_edge"] , default_to_square=A__ )
return resize(A__ , size=A__ , resample=A__ , data_format=A__ , **A__ )
def UpperCAmelCase__ ( self : int , A__ : np.ndarray , A__ : Dict[str, int] , A__ : Optional[Union[str, ChannelDimension]] = None , **A__ : Union[str, Any] , ) -> np.ndarray:
'''simple docstring'''
snake_case_ : Tuple = get_size_dict(A__ )
if "height" not in size or "width" not in size:
raise ValueError(f"The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}" )
return center_crop(A__ , size=(size["height"], size["width"]) , data_format=A__ , **A__ )
def UpperCAmelCase__ ( self : List[str] , A__ : np.ndarray , A__ : float , A__ : Optional[Union[str, ChannelDimension]] = None , **A__ : Tuple ) -> np.ndarray:
'''simple docstring'''
return rescale(A__ , scale=A__ , data_format=A__ , **A__ )
def UpperCAmelCase__ ( self : Tuple , A__ : np.ndarray , A__ : Union[float, List[float]] , A__ : Union[float, List[float]] , A__ : Optional[Union[str, ChannelDimension]] = None , **A__ : Dict , ) -> np.ndarray:
'''simple docstring'''
return normalize(A__ , mean=A__ , std=A__ , data_format=A__ , **A__ )
def UpperCAmelCase__ ( self : Union[str, Any] , A__ : ImageInput , A__ : Optional[bool] = None , A__ : Dict[str, int] = None , A__ : PILImageResampling = None , A__ : bool = None , A__ : Dict[str, int] = None , A__ : Optional[bool] = None , A__ : Optional[float] = None , A__ : Optional[bool] = None , A__ : Optional[Union[float, List[float]]] = None , A__ : Optional[Union[float, List[float]]] = None , A__ : Optional[Union[str, TensorType]] = None , A__ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **A__ : Union[str, Any] , ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Union[str, Any] = do_resize if do_resize is not None else self.do_resize
snake_case_ : Dict = size if size is not None else self.size
snake_case_ : Optional[Any] = get_size_dict(A__ , default_to_square=A__ )
snake_case_ : Tuple = resample if resample is not None else self.resample
snake_case_ : Union[str, Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
snake_case_ : str = crop_size if crop_size is not None else self.crop_size
snake_case_ : Tuple = get_size_dict(A__ , param_name="crop_size" )
snake_case_ : Dict = do_rescale if do_rescale is not None else self.do_rescale
snake_case_ : str = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case_ : Any = do_normalize if do_normalize is not None else self.do_normalize
snake_case_ : Any = image_mean if image_mean is not None else self.image_mean
snake_case_ : List[str] = image_std if image_std is not None else self.image_std
snake_case_ : Dict = make_list_of_images(A__ )
if not valid_images(A__ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
snake_case_ : Tuple = [to_numpy_array(A__ ) for image in images]
if do_resize:
snake_case_ : Any = [self.resize(image=A__ , size=A__ , resample=A__ ) for image in images]
if do_center_crop:
snake_case_ : List[str] = [self.center_crop(image=A__ , size=A__ ) for image in images]
if do_rescale:
snake_case_ : Any = [self.rescale(image=A__ , scale=A__ ) for image in images]
if do_normalize:
snake_case_ : Union[str, Any] = [self.normalize(image=A__ , mean=A__ , std=A__ ) for image in images]
snake_case_ : Optional[Any] = [to_channel_dimension_format(A__ , A__ ) for image in images]
snake_case_ : Any = {"pixel_values": images}
return BatchFeature(data=A__ , tensor_type=A__ )
def UpperCAmelCase__ ( self : List[str] , A__ : Dict , A__ : List[Tuple] = None ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Tuple = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(A__ ) != len(A__ ):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits" )
if is_torch_tensor(A__ ):
snake_case_ : Dict = target_sizes.numpy()
snake_case_ : int = []
for idx in range(len(A__ ) ):
snake_case_ : List[str] = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="bilinear" , align_corners=A__ )
snake_case_ : int = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(A__ )
else:
snake_case_ : List[Any] = logits.argmax(dim=1 )
snake_case_ : List[Any] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 666 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase = {
"configuration_timesformer": ["TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "TimesformerConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
"TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TimesformerModel",
"TimesformerForVideoClassification",
"TimesformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 666 | from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase = {
"configuration_blenderbot": [
"BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlenderbotConfig",
"BlenderbotOnnxConfig",
],
"tokenization_blenderbot": ["BlenderbotTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = ["BlenderbotTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
"BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlenderbotForCausalLM",
"BlenderbotForConditionalGeneration",
"BlenderbotModel",
"BlenderbotPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
"TFBlenderbotForConditionalGeneration",
"TFBlenderbotModel",
"TFBlenderbotPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
"FlaxBlenderbotForConditionalGeneration",
"FlaxBlenderbotModel",
"FlaxBlenderbotPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 666 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class snake_case__ ( unittest.TestCase ):
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCAmelCase__ ( self : Optional[Any] ) -> int:
'''simple docstring'''
snake_case_ : Any = 1
snake_case_ : Any = 3
snake_case_ : Any = (32, 32)
snake_case_ : Optional[Any] = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(A__ )
return image
@property
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ : Optional[int] = UNetaDConditionModel(
block_out_channels=(32, 32, 64) , layers_per_block=2 , sample_size=32 , in_channels=7 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , attention_head_dim=8 , use_linear_projection=A__ , only_cross_attention=(True, True, False) , num_class_embeds=1_00 , )
return model
@property
def UpperCAmelCase__ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ : Tuple = AutoencoderKL(
block_out_channels=[32, 32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
return model
@property
def UpperCAmelCase__ ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ : List[str] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="gelu" , projection_dim=5_12 , )
return CLIPTextModel(A__ )
def UpperCAmelCase__ ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Optional[Any] = "cpu" # ensure determinism for the device-dependent torch.Generator
snake_case_ : Any = self.dummy_cond_unet_upscale
snake_case_ : str = DDPMScheduler()
snake_case_ : Union[str, Any] = DDIMScheduler(prediction_type="v_prediction" )
snake_case_ : str = self.dummy_vae
snake_case_ : str = self.dummy_text_encoder
snake_case_ : Dict = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
snake_case_ : str = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
snake_case_ : Any = Image.fromarray(np.uinta(A__ ) ).convert("RGB" ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
snake_case_ : str = StableDiffusionUpscalePipeline(
unet=A__ , low_res_scheduler=A__ , scheduler=A__ , vae=A__ , text_encoder=A__ , tokenizer=A__ , max_noise_level=3_50 , )
snake_case_ : Union[str, Any] = sd_pipe.to(A__ )
sd_pipe.set_progress_bar_config(disable=A__ )
snake_case_ : Tuple = "A painting of a squirrel eating a burger"
snake_case_ : List[Any] = torch.Generator(device=A__ ).manual_seed(0 )
snake_case_ : Any = sd_pipe(
[prompt] , image=A__ , generator=A__ , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , )
snake_case_ : int = output.images
snake_case_ : List[str] = torch.Generator(device=A__ ).manual_seed(0 )
snake_case_ : List[str] = sd_pipe(
[prompt] , image=A__ , generator=A__ , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , return_dict=A__ , )[0]
snake_case_ : List[Any] = image[0, -3:, -3:, -1]
snake_case_ : Tuple = image_from_tuple[0, -3:, -3:, -1]
snake_case_ : Any = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
snake_case_ : Optional[int] = np.array([0.3113, 0.3910, 0.4272, 0.4859, 0.5061, 0.4652, 0.5362, 0.5715, 0.5661] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase__ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
snake_case_ : Optional[int] = "cpu" # ensure determinism for the device-dependent torch.Generator
snake_case_ : int = self.dummy_cond_unet_upscale
snake_case_ : int = DDPMScheduler()
snake_case_ : Dict = DDIMScheduler(prediction_type="v_prediction" )
snake_case_ : Any = self.dummy_vae
snake_case_ : Optional[int] = self.dummy_text_encoder
snake_case_ : List[str] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
snake_case_ : List[Any] = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
snake_case_ : List[Any] = Image.fromarray(np.uinta(A__ ) ).convert("RGB" ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
snake_case_ : int = StableDiffusionUpscalePipeline(
unet=A__ , low_res_scheduler=A__ , scheduler=A__ , vae=A__ , text_encoder=A__ , tokenizer=A__ , max_noise_level=3_50 , )
snake_case_ : List[str] = sd_pipe.to(A__ )
sd_pipe.set_progress_bar_config(disable=A__ )
snake_case_ : Union[str, Any] = "A painting of a squirrel eating a burger"
snake_case_ : Tuple = sd_pipe(
2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , )
snake_case_ : Optional[int] = output.images
assert image.shape[0] == 2
snake_case_ : Tuple = torch.Generator(device=A__ ).manual_seed(0 )
snake_case_ : List[str] = sd_pipe(
[prompt] , image=A__ , generator=A__ , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , )
snake_case_ : Optional[Any] = output.images
assert image.shape[0] == 2
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def UpperCAmelCase__ ( self : List[str] ) -> Dict:
'''simple docstring'''
snake_case_ : List[Any] = self.dummy_cond_unet_upscale
snake_case_ : int = DDPMScheduler()
snake_case_ : Union[str, Any] = DDIMScheduler(prediction_type="v_prediction" )
snake_case_ : Dict = self.dummy_vae
snake_case_ : Tuple = self.dummy_text_encoder
snake_case_ : Tuple = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
snake_case_ : Optional[int] = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
snake_case_ : List[str] = Image.fromarray(np.uinta(A__ ) ).convert("RGB" ).resize((64, 64) )
# put models in fp16, except vae as it overflows in fp16
snake_case_ : Union[str, Any] = unet.half()
snake_case_ : Optional[int] = text_encoder.half()
# make sure here that pndm scheduler skips prk
snake_case_ : Optional[int] = StableDiffusionUpscalePipeline(
unet=A__ , low_res_scheduler=A__ , scheduler=A__ , vae=A__ , text_encoder=A__ , tokenizer=A__ , max_noise_level=3_50 , )
snake_case_ : Union[str, Any] = sd_pipe.to(A__ )
sd_pipe.set_progress_bar_config(disable=A__ )
snake_case_ : Any = "A painting of a squirrel eating a burger"
snake_case_ : str = torch.manual_seed(0 )
snake_case_ : Dict = sd_pipe(
[prompt] , image=A__ , generator=A__ , num_inference_steps=2 , output_type="np" , ).images
snake_case_ : Tuple = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
@slow
@require_torch_gpu
class snake_case__ ( unittest.TestCase ):
def UpperCAmelCase__ ( self : Tuple ) -> int:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
snake_case_ : str = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-upscale/low_res_cat.png" )
snake_case_ : List[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"
"/upsampled_cat.npy" )
snake_case_ : int = "stabilityai/stable-diffusion-x4-upscaler"
snake_case_ : Union[str, Any] = StableDiffusionUpscalePipeline.from_pretrained(A__ )
pipe.to(A__ )
pipe.set_progress_bar_config(disable=A__ )
pipe.enable_attention_slicing()
snake_case_ : Dict = "a cat sitting on a park bench"
snake_case_ : Dict = torch.manual_seed(0 )
snake_case_ : Tuple = pipe(
prompt=A__ , image=A__ , generator=A__ , output_type="np" , )
snake_case_ : Union[str, Any] = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 1E-3
def UpperCAmelCase__ ( self : str ) -> Optional[int]:
'''simple docstring'''
snake_case_ : str = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-upscale/low_res_cat.png" )
snake_case_ : Dict = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"
"/upsampled_cat_fp16.npy" )
snake_case_ : Union[str, Any] = "stabilityai/stable-diffusion-x4-upscaler"
snake_case_ : str = StableDiffusionUpscalePipeline.from_pretrained(
A__ , torch_dtype=torch.floataa , )
pipe.to(A__ )
pipe.set_progress_bar_config(disable=A__ )
pipe.enable_attention_slicing()
snake_case_ : str = "a cat sitting on a park bench"
snake_case_ : Tuple = torch.manual_seed(0 )
snake_case_ : Optional[int] = pipe(
prompt=A__ , image=A__ , generator=A__ , output_type="np" , )
snake_case_ : Optional[int] = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def UpperCAmelCase__ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
snake_case_ : Optional[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-upscale/low_res_cat.png" )
snake_case_ : List[str] = "stabilityai/stable-diffusion-x4-upscaler"
snake_case_ : int = StableDiffusionUpscalePipeline.from_pretrained(
A__ , torch_dtype=torch.floataa , )
pipe.to(A__ )
pipe.set_progress_bar_config(disable=A__ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
snake_case_ : int = "a cat sitting on a park bench"
snake_case_ : Optional[int] = torch.manual_seed(0 )
snake_case_ : Dict = pipe(
prompt=A__ , image=A__ , generator=A__ , num_inference_steps=5 , output_type="np" , )
snake_case_ : List[str] = torch.cuda.max_memory_allocated()
# make sure that less than 2.9 GB is allocated
assert mem_bytes < 2.9 * 10**9
| 666 | from ...configuration_utils import PretrainedConfig
UpperCAmelCase = {
"google/tapas-base-finetuned-sqa": (
"https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json"
),
"google/tapas-base-finetuned-wtq": (
"https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json"
),
"google/tapas-base-finetuned-wikisql-supervised": (
"https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json"
),
"google/tapas-base-finetuned-tabfact": (
"https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json"
),
}
class snake_case__ ( _UpperCamelCase ):
_SCREAMING_SNAKE_CASE : Dict = "tapas"
def __init__( self : List[Any] , A__ : str=3_05_22 , A__ : Tuple=7_68 , A__ : List[Any]=12 , A__ : Optional[Any]=12 , A__ : Union[str, Any]=30_72 , A__ : Dict="gelu" , A__ : List[Any]=0.1 , A__ : str=0.1 , A__ : List[Any]=10_24 , A__ : Optional[int]=[3, 2_56, 2_56, 2, 2_56, 2_56, 10] , A__ : Union[str, Any]=0.02 , A__ : Tuple=1E-12 , A__ : Tuple=0 , A__ : Any=10.0 , A__ : List[str]=0 , A__ : List[str]=1.0 , A__ : Optional[Any]=None , A__ : Tuple=1.0 , A__ : Union[str, Any]=False , A__ : Any=None , A__ : Union[str, Any]=1.0 , A__ : int=1.0 , A__ : str=False , A__ : int=False , A__ : Optional[Any]="ratio" , A__ : str=None , A__ : int=None , A__ : Dict=64 , A__ : int=32 , A__ : Optional[Any]=False , A__ : List[str]=True , A__ : List[Any]=False , A__ : str=False , A__ : Any=True , A__ : Tuple=False , A__ : str=None , A__ : str=None , **A__ : List[str] , ) -> List[str]:
'''simple docstring'''
super().__init__(pad_token_id=A__ , **A__ )
# BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes)
snake_case_ : int = vocab_size
snake_case_ : int = hidden_size
snake_case_ : Optional[Any] = num_hidden_layers
snake_case_ : int = num_attention_heads
snake_case_ : Optional[int] = hidden_act
snake_case_ : Optional[int] = intermediate_size
snake_case_ : str = hidden_dropout_prob
snake_case_ : Dict = attention_probs_dropout_prob
snake_case_ : Any = max_position_embeddings
snake_case_ : List[Any] = type_vocab_sizes
snake_case_ : str = initializer_range
snake_case_ : Optional[Any] = layer_norm_eps
# Fine-tuning task hyperparameters
snake_case_ : Optional[int] = positive_label_weight
snake_case_ : Dict = num_aggregation_labels
snake_case_ : List[str] = aggregation_loss_weight
snake_case_ : str = use_answer_as_supervision
snake_case_ : int = answer_loss_importance
snake_case_ : Any = use_normalized_answer_loss
snake_case_ : int = huber_loss_delta
snake_case_ : List[Any] = temperature
snake_case_ : str = aggregation_temperature
snake_case_ : List[str] = use_gumbel_for_cells
snake_case_ : List[str] = use_gumbel_for_aggregation
snake_case_ : Dict = average_approximation_function
snake_case_ : List[str] = cell_selection_preference
snake_case_ : Dict = answer_loss_cutoff
snake_case_ : List[str] = max_num_rows
snake_case_ : Union[str, Any] = max_num_columns
snake_case_ : str = average_logits_per_cell
snake_case_ : Union[str, Any] = select_one_column
snake_case_ : Dict = allow_empty_column_selection
snake_case_ : List[Any] = init_cell_selection_weights_to_zero
snake_case_ : str = reset_position_index_per_cell
snake_case_ : List[Any] = disable_per_token_loss
# Aggregation hyperparameters
snake_case_ : List[str] = aggregation_labels
snake_case_ : Union[str, Any] = no_aggregation_label_index
if isinstance(self.aggregation_labels , A__ ):
snake_case_ : Optional[int] = {int(A__ ): v for k, v in aggregation_labels.items()}
| 666 | 1 |
from __future__ import annotations
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: tuple[int, int] , lowerCAmelCase_: int ):
snake_case_ ,snake_case_ : Dict = position
snake_case_ : int = [
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
snake_case_ : Union[str, Any] = []
for position in positions:
snake_case_ ,snake_case_ : Union[str, Any] = position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(lowerCAmelCase_ )
return permissible_positions
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: list[list[int]] ):
return not any(elem == 0 for row in board for elem in row )
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: list[list[int]] , lowerCAmelCase_: tuple[int, int] , lowerCAmelCase_: int ):
if is_complete(lowerCAmelCase_ ):
return True
for position in get_valid_pos(lowerCAmelCase_ , len(lowerCAmelCase_ ) ):
snake_case_ ,snake_case_ : Dict = position
if board[y][x] == 0:
snake_case_ : List[str] = curr + 1
if open_knight_tour_helper(lowerCAmelCase_ , lowerCAmelCase_ , curr + 1 ):
return True
snake_case_ : Dict = 0
return False
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: int ):
snake_case_ : Any = [[0 for i in range(lowerCAmelCase_ )] for j in range(lowerCAmelCase_ )]
for i in range(lowerCAmelCase_ ):
for j in range(lowerCAmelCase_ ):
snake_case_ : Optional[Any] = 1
if open_knight_tour_helper(lowerCAmelCase_ , (i, j) , 1 ):
return board
snake_case_ : Dict = 0
snake_case_ : str = f"Open Kight Tour cannot be performed on a board of size {n}"
raise ValueError(lowerCAmelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 666 | import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class snake_case__ ( datasets.BeamBasedBuilder ):
def UpperCAmelCase__ ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
return datasets.DatasetInfo(
features=datasets.Features({"content": datasets.Value("string" )} ) , supervised_keys=A__ , )
def UpperCAmelCase__ ( self : Optional[Any] , A__ : str , A__ : str ) -> Optional[int]:
'''simple docstring'''
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"examples": get_test_dummy_examples()} )]
def UpperCAmelCase__ ( self : int , A__ : Optional[int] , A__ : Dict ) -> Optional[Any]:
'''simple docstring'''
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(A__ )
class snake_case__ ( datasets.BeamBasedBuilder ):
def UpperCAmelCase__ ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
return datasets.DatasetInfo(
features=datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string" )} )} ) , supervised_keys=A__ , )
def UpperCAmelCase__ ( self : Any , A__ : List[str] , A__ : str ) -> Optional[int]:
'''simple docstring'''
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"examples": get_test_nested_examples()} )
]
def UpperCAmelCase__ ( self : List[Any] , A__ : List[str] , A__ : Optional[int] ) -> List[str]:
'''simple docstring'''
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(A__ )
def SCREAMING_SNAKE_CASE_ ( ):
return [(i, {"content": content}) for i, content in enumerate(["foo", "bar", "foobar"] )]
def SCREAMING_SNAKE_CASE_ ( ):
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(["foo", "bar", "foobar"] )]
class snake_case__ ( _UpperCamelCase ):
@require_beam
def UpperCAmelCase__ ( self : str ) -> List[str]:
'''simple docstring'''
snake_case_ : Union[str, Any] = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
snake_case_ : Dict = DummyBeamDataset(cache_dir=A__ , beam_runner="DirectRunner" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(A__ , builder.name , "default" , "0.0.0" , f"{builder.name}-train.arrow" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"content": datasets.Value("string" )} ) )
snake_case_ : Optional[int] = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , A__ )
self.assertEqual(dset["train"].info.splits["train"].num_examples , A__ )
self.assertDictEqual(dset["train"][0] , get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset["train"][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(A__ , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
@require_beam
def UpperCAmelCase__ ( self : str ) -> Optional[Any]:
'''simple docstring'''
import apache_beam as beam
snake_case_ : Tuple = beam.io.parquetio.WriteToParquet
snake_case_ : Union[str, Any] = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
snake_case_ : List[Any] = DummyBeamDataset(cache_dir=A__ , beam_runner="DirectRunner" )
with patch("apache_beam.io.parquetio.WriteToParquet" ) as write_parquet_mock:
snake_case_ : int = partial(A__ , num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
A__ , builder.name , "default" , "0.0.0" , f"{builder.name}-train-00000-of-00002.arrow" ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
A__ , builder.name , "default" , "0.0.0" , f"{builder.name}-train-00000-of-00002.arrow" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"content": datasets.Value("string" )} ) )
snake_case_ : Optional[Any] = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , A__ )
self.assertEqual(dset["train"].info.splits["train"].num_examples , A__ )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset["train"]["content"] ) , sorted(["foo", "bar", "foobar"] ) )
self.assertTrue(
os.path.exists(os.path.join(A__ , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
@require_beam
def UpperCAmelCase__ ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_cache_dir:
snake_case_ : Tuple = DummyBeamDataset(cache_dir=A__ )
self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare )
@require_beam
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Optional[int] = len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
snake_case_ : List[str] = NestedBeamDataset(cache_dir=A__ , beam_runner="DirectRunner" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(A__ , builder.name , "default" , "0.0.0" , f"{builder.name}-train.arrow" ) ) )
self.assertDictEqual(
builder.info.features , datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string" )} )} ) )
snake_case_ : int = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , A__ )
self.assertEqual(dset["train"].info.splits["train"].num_examples , A__ )
self.assertDictEqual(dset["train"][0] , get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset["train"][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(A__ , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
| 666 | 1 |
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bart import BartTokenizer
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
# See all BART models at https://huggingface.co/models?filter=bart
UpperCAmelCase = {
"vocab_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/vocab.json",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/vocab.json",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json",
},
"merges_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/merges.txt",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/merges.txt",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt",
},
"tokenizer_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json",
},
}
UpperCAmelCase = {
"facebook/bart-base": 1_0_2_4,
"facebook/bart-large": 1_0_2_4,
"facebook/bart-large-mnli": 1_0_2_4,
"facebook/bart-large-cnn": 1_0_2_4,
"facebook/bart-large-xsum": 1_0_2_4,
"yjernite/bart_eli5": 1_0_2_4,
}
class snake_case__ ( _UpperCamelCase ):
_SCREAMING_SNAKE_CASE : Dict = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE : Dict = PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_SCREAMING_SNAKE_CASE : int = ["input_ids", "attention_mask"]
_SCREAMING_SNAKE_CASE : List[Any] = BartTokenizer
def __init__( self : Optional[int] , A__ : Union[str, Any]=None , A__ : str=None , A__ : Dict=None , A__ : Any="replace" , A__ : Optional[int]="<s>" , A__ : Union[str, Any]="</s>" , A__ : List[Any]="</s>" , A__ : List[str]="<s>" , A__ : List[str]="<unk>" , A__ : Any="<pad>" , A__ : List[Any]="<mask>" , A__ : int=False , A__ : Union[str, Any]=True , **A__ : Tuple , ) -> str:
'''simple docstring'''
super().__init__(
A__ , A__ , tokenizer_file=A__ , errors=A__ , bos_token=A__ , eos_token=A__ , sep_token=A__ , cls_token=A__ , unk_token=A__ , pad_token=A__ , mask_token=A__ , add_prefix_space=A__ , trim_offsets=A__ , **A__ , )
snake_case_ : Dict = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , A__ ) != add_prefix_space:
snake_case_ : Dict = getattr(A__ , pre_tok_state.pop("type" ) )
snake_case_ : int = add_prefix_space
snake_case_ : List[str] = pre_tok_class(**A__ )
snake_case_ : Any = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
snake_case_ : List[str] = "post_processor"
snake_case_ : Any = getattr(self.backend_tokenizer , A__ , A__ )
if tokenizer_component_instance:
snake_case_ : List[Any] = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
snake_case_ : str = tuple(state["sep"] )
if "cls" in state:
snake_case_ : List[str] = tuple(state["cls"] )
snake_case_ : List[Any] = False
if state.get("add_prefix_space" , A__ ) != add_prefix_space:
snake_case_ : List[Any] = add_prefix_space
snake_case_ : str = True
if state.get("trim_offsets" , A__ ) != trim_offsets:
snake_case_ : Tuple = trim_offsets
snake_case_ : Optional[int] = True
if changes_to_apply:
snake_case_ : int = getattr(A__ , state.pop("type" ) )
snake_case_ : List[Any] = component_class(**A__ )
setattr(self.backend_tokenizer , A__ , A__ )
@property
def UpperCAmelCase__ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def UpperCAmelCase__ ( self : Optional[int] , A__ : List[str] ) -> int:
'''simple docstring'''
snake_case_ : Dict = AddedToken(A__ , lstrip=A__ , rstrip=A__ ) if isinstance(A__ , A__ ) else value
snake_case_ : int = value
def UpperCAmelCase__ ( self : List[Any] , *A__ : Any , **A__ : List[Any] ) -> BatchEncoding:
'''simple docstring'''
snake_case_ : int = kwargs.get("is_split_into_words" , A__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs." )
return super()._batch_encode_plus(*A__ , **A__ )
def UpperCAmelCase__ ( self : Optional[Any] , *A__ : int , **A__ : int ) -> BatchEncoding:
'''simple docstring'''
snake_case_ : Dict = kwargs.get("is_split_into_words" , A__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs." )
return super()._encode_plus(*A__ , **A__ )
def UpperCAmelCase__ ( self : Optional[int] , A__ : str , A__ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
snake_case_ : List[Any] = self._tokenizer.model.save(A__ , name=A__ )
return tuple(A__ )
def UpperCAmelCase__ ( self : Union[str, Any] , A__ : Dict , A__ : str=None ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Any = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def UpperCAmelCase__ ( self : Optional[Any] , A__ : List[int] , A__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
snake_case_ : int = [self.sep_token_id]
snake_case_ : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 666 | import pytest
from datasets import inspect_metric, list_metrics, load_metric
@pytest.fixture
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: int ):
monkeypatch.setattr("datasets.utils.deprecation_utils._emitted_deprecation_warnings" , set() )
@pytest.fixture
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: Tuple ):
class snake_case__ :
def __init__( self : Any , A__ : List[str] ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : List[Any] = metric_id
class snake_case__ :
_SCREAMING_SNAKE_CASE : List[str] = [MetricMock(_UpperCamelCase ) for metric_id in ["accuracy", "mse", "precision", "codeparrot/apps_metric"]]
def UpperCAmelCase__ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
return self._metrics
monkeypatch.setattr("datasets.inspect.huggingface_hub" , HfhMock() )
@pytest.mark.parametrize(
"func, args" , [(load_metric, ("metrics/mse",)), (list_metrics, ()), (inspect_metric, ("metrics/mse", "tmp_path"))] )
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: Tuple , lowerCAmelCase_: int , lowerCAmelCase_: List[Any] , lowerCAmelCase_: Any , lowerCAmelCase_: List[str] ):
if "tmp_path" in args:
snake_case_ : List[Any] = tuple(arg if arg != "tmp_path" else tmp_path for arg in args )
with pytest.warns(lowerCAmelCase_ , match="https://huggingface.co/docs/evaluate" ):
func(*lowerCAmelCase_ )
| 666 | 1 |
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: int = 1_0_0_0 ):
snake_case_ ,snake_case_ : List[str] = 1, 1
snake_case_ : List[str] = 2
while True:
snake_case_ : Tuple = 0
snake_case_ : Union[str, Any] = fa + fa
snake_case_ ,snake_case_ : str = fa, f
index += 1
for _ in str(lowerCAmelCase_ ):
i += 1
if i == n:
break
return index
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 666 | from __future__ import annotations
import bisect
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: list[int] , lowerCAmelCase_: int , lowerCAmelCase_: int = 0 , lowerCAmelCase_: int = -1 ):
if hi < 0:
snake_case_ : Any = len(lowerCAmelCase_ )
while lo < hi:
snake_case_ : List[Any] = lo + (hi - lo) // 2
if sorted_collection[mid] < item:
snake_case_ : Tuple = mid + 1
else:
snake_case_ : Dict = mid
return lo
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: list[int] , lowerCAmelCase_: int , lowerCAmelCase_: int = 0 , lowerCAmelCase_: int = -1 ):
if hi < 0:
snake_case_ : Optional[Any] = len(lowerCAmelCase_ )
while lo < hi:
snake_case_ : Union[str, Any] = lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
snake_case_ : Optional[Any] = mid + 1
else:
snake_case_ : Tuple = mid
return lo
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: list[int] , lowerCAmelCase_: int , lowerCAmelCase_: int = 0 , lowerCAmelCase_: int = -1 ):
sorted_collection.insert(bisect_left(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) , lowerCAmelCase_ )
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: list[int] , lowerCAmelCase_: int , lowerCAmelCase_: int = 0 , lowerCAmelCase_: int = -1 ):
sorted_collection.insert(bisect_right(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) , lowerCAmelCase_ )
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: list[int] , lowerCAmelCase_: int ):
snake_case_ : Dict = 0
snake_case_ : Tuple = len(lowerCAmelCase_ ) - 1
while left <= right:
snake_case_ : int = left + (right - left) // 2
snake_case_ : Optional[Any] = sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
snake_case_ : Optional[Any] = midpoint - 1
else:
snake_case_ : Optional[int] = midpoint + 1
return None
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: list[int] , lowerCAmelCase_: int ):
snake_case_ : Optional[int] = bisect.bisect_left(lowerCAmelCase_ , lowerCAmelCase_ )
if index != len(lowerCAmelCase_ ) and sorted_collection[index] == item:
return index
return None
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: list[int] , lowerCAmelCase_: int , lowerCAmelCase_: int , lowerCAmelCase_: int ):
if right < left:
return None
snake_case_ : List[Any] = left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , midpoint - 1 )
else:
return binary_search_by_recursion(lowerCAmelCase_ , lowerCAmelCase_ , midpoint + 1 , lowerCAmelCase_ )
if __name__ == "__main__":
UpperCAmelCase = input("Enter numbers separated by comma:\n").strip()
UpperCAmelCase = sorted(int(item) for item in user_input.split(","))
UpperCAmelCase = int(input("Enter a single number to be found in the list:\n"))
UpperCAmelCase = binary_search(collection, target)
if result is None:
print(F"{target} was not found in {collection}.")
else:
print(F"{target} was found at position {result} in {collection}.")
| 666 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
"facebook/timesformer": "https://huggingface.co/facebook/timesformer/resolve/main/config.json",
}
class snake_case__ ( _UpperCamelCase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = "timesformer"
def __init__( self : List[str] , A__ : List[Any]=2_24 , A__ : Any=16 , A__ : Optional[int]=3 , A__ : Any=8 , A__ : Union[str, Any]=7_68 , A__ : Any=12 , A__ : Any=12 , A__ : List[str]=30_72 , A__ : List[str]="gelu" , A__ : Union[str, Any]=0.0 , A__ : Tuple=0.0 , A__ : List[Any]=0.02 , A__ : Tuple=1E-6 , A__ : List[str]=True , A__ : Dict="divided_space_time" , A__ : Optional[Any]=0 , **A__ : str , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(**A__ )
snake_case_ : Tuple = image_size
snake_case_ : Any = patch_size
snake_case_ : Dict = num_channels
snake_case_ : Optional[Any] = num_frames
snake_case_ : Union[str, Any] = hidden_size
snake_case_ : Dict = num_hidden_layers
snake_case_ : List[str] = num_attention_heads
snake_case_ : Optional[int] = intermediate_size
snake_case_ : Tuple = hidden_act
snake_case_ : str = hidden_dropout_prob
snake_case_ : Any = attention_probs_dropout_prob
snake_case_ : Tuple = initializer_range
snake_case_ : Dict = layer_norm_eps
snake_case_ : Union[str, Any] = qkv_bias
snake_case_ : Tuple = attention_type
snake_case_ : Optional[Any] = drop_path_rate
| 666 | import inspect
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel, VQModel
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class snake_case__ ( _UpperCamelCase ):
def __init__( self : Union[str, Any] , A__ : VQModel , A__ : UNetaDModel , A__ : DDIMScheduler ) -> List[Any]:
'''simple docstring'''
super().__init__()
self.register_modules(vqvae=A__ , unet=A__ , scheduler=A__ )
@torch.no_grad()
def __call__( self : str , A__ : int = 1 , A__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , A__ : float = 0.0 , A__ : int = 50 , A__ : Optional[str] = "pil" , A__ : bool = True , **A__ : Optional[Any] , ) -> Union[Tuple, ImagePipelineOutput]:
'''simple docstring'''
snake_case_ : Optional[int] = randn_tensor(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=A__ , )
snake_case_ : List[Any] = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
snake_case_ : Any = latents * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(A__ )
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
snake_case_ : Union[str, Any] = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
snake_case_ : List[Any] = {}
if accepts_eta:
snake_case_ : int = eta
for t in self.progress_bar(self.scheduler.timesteps ):
snake_case_ : Union[str, Any] = self.scheduler.scale_model_input(A__ , A__ )
# predict the noise residual
snake_case_ : Dict = self.unet(A__ , A__ ).sample
# compute the previous noisy sample x_t -> x_t-1
snake_case_ : Union[str, Any] = self.scheduler.step(A__ , A__ , A__ , **A__ ).prev_sample
# decode the image latents with the VAE
snake_case_ : int = self.vqvae.decode(A__ ).sample
snake_case_ : Dict = (image / 2 + 0.5).clamp(0 , 1 )
snake_case_ : Dict = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
snake_case_ : Optional[int] = self.numpy_to_pil(A__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=A__ )
| 666 | 1 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFXLMRobertaModel
@require_tf
@require_sentencepiece
@require_tokenizers
class snake_case__ ( unittest.TestCase ):
@slow
def UpperCAmelCase__ ( self : int ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Dict = TFXLMRobertaModel.from_pretrained("jplu/tf-xlm-roberta-base" )
snake_case_ : Any = {
"input_ids": tf.convert_to_tensor([[0, 26_46, 1_02_69, 83, 9_99_42, 2]] , dtype=tf.intaa ), # "My dog is cute"
"attention_mask": tf.convert_to_tensor([[1, 1, 1, 1, 1, 1]] , dtype=tf.intaa ),
}
snake_case_ : List[str] = model(A__ )["last_hidden_state"]
snake_case_ : str = tf.TensorShape((1, 6, 7_68) )
self.assertEqual(output.shape , A__ )
# compare the actual values for a slice.
snake_case_ : List[str] = tf.convert_to_tensor(
[
[
[0.068_1762, 0.1089_4451, 0.0677_2504],
[-0.0642_3668, 0.0236_6615, 0.0432_9344],
[-0.0605_7295, 0.0997_4135, -0.0007_0584],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 666 | from decimal import Decimal, getcontext
from math import ceil, factorial
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: int ):
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
raise TypeError("Undefined for non-integers" )
elif precision < 1:
raise ValueError("Undefined for non-natural numbers" )
snake_case_ : List[str] = precision
snake_case_ : Union[str, Any] = ceil(precision / 1_4 )
snake_case_ : List[str] = 4_2_6_8_8_0 * Decimal(1_0_0_0_5 ).sqrt()
snake_case_ : str = 1
snake_case_ : List[str] = 1_3_5_9_1_4_0_9
snake_case_ : str = Decimal(lowerCAmelCase_ )
for k in range(1 , lowerCAmelCase_ ):
snake_case_ : Tuple = factorial(6 * k ) // (factorial(3 * k ) * factorial(lowerCAmelCase_ ) ** 3)
linear_term += 5_4_5_1_4_0_1_3_4
exponential_term *= -2_6_2_5_3_7_4_1_2_6_4_0_7_6_8_0_0_0
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
UpperCAmelCase = 5_0
print(F"The first {n} digits of pi is: {pi(n)}")
| 666 | 1 |
import numpy as np
import pandas as pd
from sklearn.preprocessing import Normalizer
from sklearn.svm import SVR
from statsmodels.tsa.statespace.sarimax import SARIMAX
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: list , lowerCAmelCase_: list , lowerCAmelCase_: list , lowerCAmelCase_: list , lowerCAmelCase_: list ):
snake_case_ : List[Any] = np.array([[1, item, train_mtch[i]] for i, item in enumerate(lowerCAmelCase_ )] )
snake_case_ : Optional[int] = np.array(lowerCAmelCase_ )
snake_case_ : Optional[int] = np.dot(np.dot(np.linalg.inv(np.dot(x.transpose() , lowerCAmelCase_ ) ) , x.transpose() ) , lowerCAmelCase_ )
return abs(beta[0] + test_dt[0] * beta[1] + test_mtch[0] + beta[2] )
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: list , lowerCAmelCase_: list , lowerCAmelCase_: list ):
snake_case_ : Any = (1, 2, 1)
snake_case_ : str = (1, 1, 0, 7)
snake_case_ : Dict = SARIMAX(
lowerCAmelCase_ , exog=lowerCAmelCase_ , order=lowerCAmelCase_ , seasonal_order=lowerCAmelCase_ )
snake_case_ : List[Any] = model.fit(disp=lowerCAmelCase_ , maxiter=6_0_0 , method="nm" )
snake_case_ : str = model_fit.predict(1 , len(lowerCAmelCase_ ) , exog=[test_match] )
return result[0]
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: list , lowerCAmelCase_: list , lowerCAmelCase_: list ):
snake_case_ : Union[str, Any] = SVR(kernel="rbf" , C=1 , gamma=0.1 , epsilon=0.1 )
regressor.fit(lowerCAmelCase_ , lowerCAmelCase_ )
snake_case_ : Any = regressor.predict(lowerCAmelCase_ )
return y_pred[0]
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: list ):
train_user.sort()
snake_case_ : Any = np.percentile(lowerCAmelCase_ , 2_5 )
snake_case_ : Tuple = np.percentile(lowerCAmelCase_ , 7_5 )
snake_case_ : Any = qa - qa
snake_case_ : str = qa - (iqr * 0.1)
return low_lim
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: list , lowerCAmelCase_: float ):
snake_case_ : Optional[Any] = 0
snake_case_ : List[Any] = 0
for i in list_vote:
if i > actual_result:
snake_case_ : Optional[Any] = not_safe + 1
else:
if abs(abs(lowerCAmelCase_ ) - abs(lowerCAmelCase_ ) ) <= 0.1:
safe += 1
else:
not_safe += 1
return safe > not_safe
if __name__ == "__main__":
# data_input_df = pd.read_csv("ex_data.csv", header=None)
UpperCAmelCase = [[1_8_2_3_1, 0.0, 1], [2_2_6_2_1, 1.0, 2], [1_5_6_7_5, 0.0, 3], [2_3_5_8_3, 1.0, 4]]
UpperCAmelCase = pd.DataFrame(
data_input, columns=["total_user", "total_even", "days"]
)
UpperCAmelCase = Normalizer().fit_transform(data_input_df.values)
# split data
UpperCAmelCase = normalize_df[:, 2].tolist()
UpperCAmelCase = normalize_df[:, 0].tolist()
UpperCAmelCase = normalize_df[:, 1].tolist()
# for svr (input variable = total date and total match)
UpperCAmelCase = normalize_df[:, [1, 2]].tolist()
UpperCAmelCase = x[: len(x) - 1]
UpperCAmelCase = x[len(x) - 1 :]
# for linear regression & sarimax
UpperCAmelCase = total_date[: len(total_date) - 1]
UpperCAmelCase = total_user[: len(total_user) - 1]
UpperCAmelCase = total_match[: len(total_match) - 1]
UpperCAmelCase = total_date[len(total_date) - 1 :]
UpperCAmelCase = total_user[len(total_user) - 1 :]
UpperCAmelCase = total_match[len(total_match) - 1 :]
# voting system with forecasting
UpperCAmelCase = [
linear_regression_prediction(
trn_date, trn_user, trn_match, tst_date, tst_match
),
sarimax_predictor(trn_user, trn_match, tst_match),
support_vector_regressor(x_train, x_test, trn_user),
]
# check the safety of today's data
UpperCAmelCase = "" if data_safety_checker(res_vote, tst_user) else "not "
print("Today's data is {not_str}safe.")
| 666 | def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: int = 1_0_0_0 ):
snake_case_ ,snake_case_ : List[str] = 1, 1
snake_case_ : List[str] = 2
while True:
snake_case_ : Tuple = 0
snake_case_ : Union[str, Any] = fa + fa
snake_case_ ,snake_case_ : str = fa, f
index += 1
for _ in str(lowerCAmelCase_ ):
i += 1
if i == n:
break
return index
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 666 | 1 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: str ):
snake_case_ : Optional[int] = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
snake_case_ : str = 1_9_2
snake_case_ : int = 7_6_8
snake_case_ : Any = 1_2
snake_case_ : List[Any] = 3
snake_case_ : List[str] = [8_0_0, 1_3_3_3]
snake_case_ : Optional[Any] = False
elif yolos_name == "yolos_s_dWr":
snake_case_ : List[str] = 3_3_0
snake_case_ : Union[str, Any] = 1_4
snake_case_ : Optional[int] = 6
snake_case_ : Any = 1_3_2_0
elif "yolos_s" in yolos_name:
snake_case_ : Dict = 3_8_4
snake_case_ : Optional[int] = 1_5_3_6
snake_case_ : List[Any] = 1_2
snake_case_ : Tuple = 6
elif "yolos_b" in yolos_name:
snake_case_ : List[Any] = [8_0_0, 1_3_4_4]
snake_case_ : str = 9_1
snake_case_ : Dict = "huggingface/label-files"
snake_case_ : Tuple = "coco-detection-id2label.json"
snake_case_ : Dict = json.load(open(hf_hub_download(lowerCAmelCase_ , lowerCAmelCase_ , repo_type="dataset" ) , "r" ) )
snake_case_ : Dict = {int(lowerCAmelCase_ ): v for k, v in idalabel.items()}
snake_case_ : List[Any] = idalabel
snake_case_ : Any = {v: k for k, v in idalabel.items()}
return config
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: dict , lowerCAmelCase_: YolosConfig , lowerCAmelCase_: bool = False ):
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
snake_case_ : Dict = state_dict.pop(f"blocks.{i}.attn.qkv.weight" )
snake_case_ : Dict = state_dict.pop(f"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
snake_case_ : List[Any] = in_proj_weight[: config.hidden_size, :]
snake_case_ : Tuple = in_proj_bias[: config.hidden_size]
snake_case_ : Any = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
snake_case_ : List[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
snake_case_ : Optional[int] = in_proj_weight[-config.hidden_size :, :]
snake_case_ : List[Any] = in_proj_bias[-config.hidden_size :]
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: str ):
if "backbone" in name:
snake_case_ : Optional[Any] = name.replace("backbone" , "vit" )
if "cls_token" in name:
snake_case_ : List[Any] = name.replace("cls_token" , "embeddings.cls_token" )
if "det_token" in name:
snake_case_ : str = name.replace("det_token" , "embeddings.detection_tokens" )
if "mid_pos_embed" in name:
snake_case_ : List[str] = name.replace("mid_pos_embed" , "encoder.mid_position_embeddings" )
if "pos_embed" in name:
snake_case_ : Tuple = name.replace("pos_embed" , "embeddings.position_embeddings" )
if "patch_embed.proj" in name:
snake_case_ : Union[str, Any] = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "blocks" in name:
snake_case_ : Optional[int] = name.replace("blocks" , "encoder.layer" )
if "attn.proj" in name:
snake_case_ : Optional[int] = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
snake_case_ : List[str] = name.replace("attn" , "attention.self" )
if "norm1" in name:
snake_case_ : Union[str, Any] = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
snake_case_ : Any = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
snake_case_ : Union[str, Any] = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
snake_case_ : int = name.replace("mlp.fc2" , "output.dense" )
if "class_embed" in name:
snake_case_ : Optional[Any] = name.replace("class_embed" , "class_labels_classifier" )
if "bbox_embed" in name:
snake_case_ : Any = name.replace("bbox_embed" , "bbox_predictor" )
if "vit.norm" in name:
snake_case_ : int = name.replace("vit.norm" , "vit.layernorm" )
return name
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: dict , lowerCAmelCase_: YolosForObjectDetection ):
for key in orig_state_dict.copy().keys():
snake_case_ : Optional[int] = orig_state_dict.pop(lowerCAmelCase_ )
if "qkv" in key:
snake_case_ : Optional[int] = key.split("." )
snake_case_ : Dict = int(key_split[2] )
snake_case_ : List[Any] = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
snake_case_ : Union[str, Any] = val[:dim, :]
snake_case_ : List[str] = val[
dim : dim * 2, :
]
snake_case_ : Optional[int] = val[-dim:, :]
else:
snake_case_ : Dict = val[:dim]
snake_case_ : Union[str, Any] = val[dim : dim * 2]
snake_case_ : Dict = val[-dim:]
else:
snake_case_ : List[str] = val
return orig_state_dict
def SCREAMING_SNAKE_CASE_ ( ):
snake_case_ : Optional[Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
snake_case_ : Union[str, Any] = Image.open(requests.get(lowerCAmelCase_ , stream=lowerCAmelCase_ ).raw )
return im
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: str , lowerCAmelCase_: str , lowerCAmelCase_: str , lowerCAmelCase_: bool = False ):
snake_case_ : Tuple = get_yolos_config(lowerCAmelCase_ )
# load original state_dict
snake_case_ : List[str] = torch.load(lowerCAmelCase_ , map_location="cpu" )["model"]
# load 🤗 model
snake_case_ : Any = YolosForObjectDetection(lowerCAmelCase_ )
model.eval()
snake_case_ : int = convert_state_dict(lowerCAmelCase_ , lowerCAmelCase_ )
model.load_state_dict(lowerCAmelCase_ )
# Check outputs on an image, prepared by YolosImageProcessor
snake_case_ : Dict = 8_0_0 if yolos_name != "yolos_ti" else 5_1_2
snake_case_ : int = YolosImageProcessor(format="coco_detection" , size=lowerCAmelCase_ )
snake_case_ : int = image_processor(images=prepare_img() , return_tensors="pt" )
snake_case_ : List[str] = model(**lowerCAmelCase_ )
snake_case_ ,snake_case_ : List[str] = outputs.logits, outputs.pred_boxes
snake_case_ ,snake_case_ : Optional[int] = None, None
if yolos_name == "yolos_ti":
snake_case_ : Optional[Any] = torch.tensor(
[[-3_9.5_0_2_2, -1_1.9_8_2_0, -1_7.6_8_8_8], [-2_9.9_5_7_4, -9.9_7_6_9, -1_7.7_6_9_1], [-4_2.3_2_8_1, -2_0.7_2_0_0, -3_0.6_2_9_4]] )
snake_case_ : Optional[Any] = torch.tensor(
[[0.4_0_2_1, 0.0_8_3_6, 0.7_9_7_9], [0.0_1_8_4, 0.2_6_0_9, 0.0_3_6_4], [0.1_7_8_1, 0.2_0_0_4, 0.2_0_9_5]] )
elif yolos_name == "yolos_s_200_pre":
snake_case_ : Tuple = torch.tensor(
[[-2_4.0_2_4_8, -1_0.3_0_2_4, -1_4.8_2_9_0], [-4_2.0_3_9_2, -1_6.8_2_0_0, -2_7.4_3_3_4], [-2_7.2_7_4_3, -1_1.8_1_5_4, -1_8.7_1_4_8]] )
snake_case_ : Optional[Any] = torch.tensor(
[[0.2_5_5_9, 0.5_4_5_5, 0.4_7_0_6], [0.2_9_8_9, 0.7_2_7_9, 0.1_8_7_5], [0.7_7_3_2, 0.4_0_1_7, 0.4_4_6_2]] )
elif yolos_name == "yolos_s_300_pre":
snake_case_ : Dict = torch.tensor(
[[-3_6.2_2_2_0, -1_4.4_3_8_5, -2_3.5_4_5_7], [-3_5.6_9_7_0, -1_4.7_5_8_3, -2_1.3_9_3_5], [-3_1.5_9_3_9, -1_3.6_0_4_2, -1_6.8_0_4_9]] )
snake_case_ : Union[str, Any] = torch.tensor(
[[0.7_6_1_4, 0.2_3_1_6, 0.4_7_2_8], [0.7_1_6_8, 0.4_4_9_5, 0.3_8_5_5], [0.4_9_9_6, 0.1_4_6_6, 0.9_9_9_6]] )
elif yolos_name == "yolos_s_dWr":
snake_case_ : Union[str, Any] = torch.tensor(
[[-4_2.8_6_6_8, -2_4.1_0_4_9, -4_1.1_6_9_0], [-3_4.7_4_5_6, -1_4.1_2_7_4, -2_4.9_1_9_4], [-3_3.7_8_9_8, -1_2.1_9_4_6, -2_5.6_4_9_5]] )
snake_case_ : Optional[int] = torch.tensor(
[[0.5_5_8_7, 0.2_7_7_3, 0.0_6_0_5], [0.5_0_0_4, 0.3_0_1_4, 0.9_9_9_4], [0.4_9_9_9, 0.1_5_4_8, 0.9_9_9_4]] )
elif yolos_name == "yolos_base":
snake_case_ : List[Any] = torch.tensor(
[[-4_0.6_0_6_4, -2_4.3_0_8_4, -3_2.6_4_4_7], [-5_5.1_9_9_0, -3_0.7_7_1_9, -3_5.5_8_7_7], [-5_1.4_3_1_1, -3_3.3_5_0_7, -3_5.6_4_6_2]] )
snake_case_ : Optional[Any] = torch.tensor(
[[0.5_5_5_5, 0.2_7_9_4, 0.0_6_5_5], [0.9_0_4_9, 0.2_6_6_4, 0.1_8_9_4], [0.9_1_8_3, 0.1_9_8_4, 0.1_6_3_5]] )
else:
raise ValueError(f"Unknown yolos_name: {yolos_name}" )
assert torch.allclose(logits[0, :3, :3] , lowerCAmelCase_ , atol=1e-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , lowerCAmelCase_ , atol=1e-4 )
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
print(f"Saving model {yolos_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(lowerCAmelCase_ )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(lowerCAmelCase_ )
if push_to_hub:
snake_case_ : int = {
"yolos_ti": "yolos-tiny",
"yolos_s_200_pre": "yolos-small",
"yolos_s_300_pre": "yolos-small-300",
"yolos_s_dWr": "yolos-small-dwr",
"yolos_base": "yolos-base",
}
print("Pushing to the hub..." )
snake_case_ : Optional[Any] = model_mapping[yolos_name]
image_processor.push_to_hub(lowerCAmelCase_ , organization="hustvl" )
model.push_to_hub(lowerCAmelCase_ , organization="hustvl" )
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--yolos_name",
default="yolos_s_200_pre",
type=str,
help=(
"Name of the YOLOS model you'd like to convert. Should be one of 'yolos_ti', 'yolos_s_200_pre',"
" 'yolos_s_300_pre', 'yolos_s_dWr', 'yolos_base'."
),
)
parser.add_argument(
"--checkpoint_path", default=None, type=str, help="Path to the original state dict (.pth file)."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
UpperCAmelCase = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 666 | from __future__ import annotations
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: list[int | float] , lowerCAmelCase_: int , lowerCAmelCase_: int ):
if len(lowerCAmelCase_ ) == 0:
raise ValueError("find_max() arg is an empty sequence" )
if (
left >= len(lowerCAmelCase_ )
or left < -len(lowerCAmelCase_ )
or right >= len(lowerCAmelCase_ )
or right < -len(lowerCAmelCase_ )
):
raise IndexError("list index out of range" )
if left == right:
return nums[left]
snake_case_ : List[Any] = (left + right) >> 1 # the middle
snake_case_ : Dict = find_max(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) # find max in range[left, mid]
snake_case_ : int = find_max(lowerCAmelCase_ , mid + 1 , lowerCAmelCase_ ) # find max in range[mid + 1, right]
return left_max if left_max >= right_max else right_max
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 666 | 1 |
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
UpperCAmelCase = logging.get_logger(__name__)
class snake_case__ ( _UpperCamelCase ):
_SCREAMING_SNAKE_CASE : str = ["pixel_values"]
def __init__( self : List[Any] , A__ : bool = True , A__ : Optional[Dict[str, int]] = None , A__ : PILImageResampling = PILImageResampling.BILINEAR , A__ : bool = True , A__ : Dict[str, int] = None , A__ : bool = True , A__ : Union[int, float] = 1 / 2_55 , A__ : bool = True , A__ : Optional[Union[float, List[float]]] = None , A__ : Optional[Union[float, List[float]]] = None , **A__ : int , ) -> None:
'''simple docstring'''
super().__init__(**A__ )
snake_case_ : Optional[int] = size if size is not None else {"shortest_edge": 2_56}
snake_case_ : Dict = get_size_dict(A__ , default_to_square=A__ )
snake_case_ : List[str] = crop_size if crop_size is not None else {"height": 2_24, "width": 2_24}
snake_case_ : Any = get_size_dict(A__ , param_name="crop_size" )
snake_case_ : int = do_resize
snake_case_ : Optional[Any] = size
snake_case_ : Optional[Any] = resample
snake_case_ : Optional[int] = do_center_crop
snake_case_ : List[Any] = crop_size
snake_case_ : List[Any] = do_rescale
snake_case_ : Optional[int] = rescale_factor
snake_case_ : Optional[Any] = do_normalize
snake_case_ : List[Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
snake_case_ : Optional[Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCAmelCase__ ( self : List[str] , A__ : np.ndarray , A__ : Dict[str, int] , A__ : PILImageResampling = PILImageResampling.BICUBIC , A__ : Optional[Union[str, ChannelDimension]] = None , **A__ : str , ) -> np.ndarray:
'''simple docstring'''
snake_case_ : Optional[Any] = get_size_dict(A__ , default_to_square=A__ )
if "shortest_edge" not in size:
raise ValueError(f"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}" )
snake_case_ : Any = get_resize_output_image_size(A__ , size=size["shortest_edge"] , default_to_square=A__ )
return resize(A__ , size=A__ , resample=A__ , data_format=A__ , **A__ )
def UpperCAmelCase__ ( self : int , A__ : np.ndarray , A__ : Dict[str, int] , A__ : Optional[Union[str, ChannelDimension]] = None , **A__ : Union[str, Any] , ) -> np.ndarray:
'''simple docstring'''
snake_case_ : Tuple = get_size_dict(A__ )
if "height" not in size or "width" not in size:
raise ValueError(f"The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}" )
return center_crop(A__ , size=(size["height"], size["width"]) , data_format=A__ , **A__ )
def UpperCAmelCase__ ( self : List[str] , A__ : np.ndarray , A__ : float , A__ : Optional[Union[str, ChannelDimension]] = None , **A__ : Tuple ) -> np.ndarray:
'''simple docstring'''
return rescale(A__ , scale=A__ , data_format=A__ , **A__ )
def UpperCAmelCase__ ( self : Tuple , A__ : np.ndarray , A__ : Union[float, List[float]] , A__ : Union[float, List[float]] , A__ : Optional[Union[str, ChannelDimension]] = None , **A__ : Dict , ) -> np.ndarray:
'''simple docstring'''
return normalize(A__ , mean=A__ , std=A__ , data_format=A__ , **A__ )
def UpperCAmelCase__ ( self : Union[str, Any] , A__ : ImageInput , A__ : Optional[bool] = None , A__ : Dict[str, int] = None , A__ : PILImageResampling = None , A__ : bool = None , A__ : Dict[str, int] = None , A__ : Optional[bool] = None , A__ : Optional[float] = None , A__ : Optional[bool] = None , A__ : Optional[Union[float, List[float]]] = None , A__ : Optional[Union[float, List[float]]] = None , A__ : Optional[Union[str, TensorType]] = None , A__ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **A__ : Union[str, Any] , ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Union[str, Any] = do_resize if do_resize is not None else self.do_resize
snake_case_ : Dict = size if size is not None else self.size
snake_case_ : Optional[Any] = get_size_dict(A__ , default_to_square=A__ )
snake_case_ : Tuple = resample if resample is not None else self.resample
snake_case_ : Union[str, Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
snake_case_ : str = crop_size if crop_size is not None else self.crop_size
snake_case_ : Tuple = get_size_dict(A__ , param_name="crop_size" )
snake_case_ : Dict = do_rescale if do_rescale is not None else self.do_rescale
snake_case_ : str = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case_ : Any = do_normalize if do_normalize is not None else self.do_normalize
snake_case_ : Any = image_mean if image_mean is not None else self.image_mean
snake_case_ : List[str] = image_std if image_std is not None else self.image_std
snake_case_ : Dict = make_list_of_images(A__ )
if not valid_images(A__ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
snake_case_ : Tuple = [to_numpy_array(A__ ) for image in images]
if do_resize:
snake_case_ : Any = [self.resize(image=A__ , size=A__ , resample=A__ ) for image in images]
if do_center_crop:
snake_case_ : List[str] = [self.center_crop(image=A__ , size=A__ ) for image in images]
if do_rescale:
snake_case_ : Any = [self.rescale(image=A__ , scale=A__ ) for image in images]
if do_normalize:
snake_case_ : Union[str, Any] = [self.normalize(image=A__ , mean=A__ , std=A__ ) for image in images]
snake_case_ : Optional[Any] = [to_channel_dimension_format(A__ , A__ ) for image in images]
snake_case_ : Any = {"pixel_values": images}
return BatchFeature(data=A__ , tensor_type=A__ )
def UpperCAmelCase__ ( self : List[str] , A__ : Dict , A__ : List[Tuple] = None ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Tuple = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(A__ ) != len(A__ ):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits" )
if is_torch_tensor(A__ ):
snake_case_ : Dict = target_sizes.numpy()
snake_case_ : int = []
for idx in range(len(A__ ) ):
snake_case_ : List[str] = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="bilinear" , align_corners=A__ )
snake_case_ : int = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(A__ )
else:
snake_case_ : List[Any] = logits.argmax(dim=1 )
snake_case_ : List[Any] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 666 | import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
UpperCAmelCase = {
"vocab_file": {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/vocab.json",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/vocab.json",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/vocab.json",
"roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json",
"roberta-large-openai-detector": (
"https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json"
),
},
"merges_file": {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/merges.txt",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/merges.txt",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/merges.txt",
"roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt",
"roberta-large-openai-detector": (
"https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt"
),
},
"tokenizer_file": {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/tokenizer.json",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/tokenizer.json",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json",
"roberta-base-openai-detector": (
"https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json"
),
"roberta-large-openai-detector": (
"https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json"
),
},
}
UpperCAmelCase = {
"roberta-base": 5_1_2,
"roberta-large": 5_1_2,
"roberta-large-mnli": 5_1_2,
"distilroberta-base": 5_1_2,
"roberta-base-openai-detector": 5_1_2,
"roberta-large-openai-detector": 5_1_2,
}
class snake_case__ ( _UpperCamelCase ):
_SCREAMING_SNAKE_CASE : Dict = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE : Any = PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_SCREAMING_SNAKE_CASE : int = ["input_ids", "attention_mask"]
_SCREAMING_SNAKE_CASE : List[str] = RobertaTokenizer
def __init__( self : Optional[int] , A__ : List[Any]=None , A__ : Optional[int]=None , A__ : List[str]=None , A__ : Dict="replace" , A__ : List[str]="<s>" , A__ : Optional[Any]="</s>" , A__ : List[str]="</s>" , A__ : List[Any]="<s>" , A__ : int="<unk>" , A__ : int="<pad>" , A__ : List[Any]="<mask>" , A__ : Any=False , A__ : Optional[int]=True , **A__ : Union[str, Any] , ) -> int:
'''simple docstring'''
super().__init__(
A__ , A__ , tokenizer_file=A__ , errors=A__ , bos_token=A__ , eos_token=A__ , sep_token=A__ , cls_token=A__ , unk_token=A__ , pad_token=A__ , mask_token=A__ , add_prefix_space=A__ , trim_offsets=A__ , **A__ , )
snake_case_ : Dict = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , A__ ) != add_prefix_space:
snake_case_ : List[Any] = getattr(A__ , pre_tok_state.pop("type" ) )
snake_case_ : Any = add_prefix_space
snake_case_ : List[Any] = pre_tok_class(**A__ )
snake_case_ : Optional[int] = add_prefix_space
snake_case_ : List[str] = "post_processor"
snake_case_ : Tuple = getattr(self.backend_tokenizer , A__ , A__ )
if tokenizer_component_instance:
snake_case_ : List[str] = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
snake_case_ : str = tuple(state["sep"] )
if "cls" in state:
snake_case_ : Tuple = tuple(state["cls"] )
snake_case_ : Tuple = False
if state.get("add_prefix_space" , A__ ) != add_prefix_space:
snake_case_ : Optional[Any] = add_prefix_space
snake_case_ : str = True
if state.get("trim_offsets" , A__ ) != trim_offsets:
snake_case_ : Optional[int] = trim_offsets
snake_case_ : List[Any] = True
if changes_to_apply:
snake_case_ : int = getattr(A__ , state.pop("type" ) )
snake_case_ : List[Any] = component_class(**A__ )
setattr(self.backend_tokenizer , A__ , A__ )
@property
def UpperCAmelCase__ ( self : Optional[Any] ) -> str:
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def UpperCAmelCase__ ( self : Tuple , A__ : Dict ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Any = AddedToken(A__ , lstrip=A__ , rstrip=A__ ) if isinstance(A__ , A__ ) else value
snake_case_ : Any = value
def UpperCAmelCase__ ( self : int , *A__ : Optional[Any] , **A__ : int ) -> BatchEncoding:
'''simple docstring'''
snake_case_ : Optional[Any] = kwargs.get("is_split_into_words" , A__ )
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*A__ , **A__ )
def UpperCAmelCase__ ( self : Union[str, Any] , *A__ : Any , **A__ : List[Any] ) -> BatchEncoding:
'''simple docstring'''
snake_case_ : Optional[int] = kwargs.get("is_split_into_words" , A__ )
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._encode_plus(*A__ , **A__ )
def UpperCAmelCase__ ( self : Tuple , A__ : str , A__ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
snake_case_ : Optional[Any] = self._tokenizer.model.save(A__ , name=A__ )
return tuple(A__ )
def UpperCAmelCase__ ( self : int , A__ : List[str] , A__ : Union[str, Any]=None ) -> Any:
'''simple docstring'''
snake_case_ : List[str] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def UpperCAmelCase__ ( self : Dict , A__ : List[int] , A__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
snake_case_ : str = [self.sep_token_id]
snake_case_ : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 666 | 1 |
import os
import textwrap
import pyarrow as pa
import pytest
from datasets import ClassLabel, Features, Image
from datasets.packaged_modules.csv.csv import Csv
from ..utils import require_pil
@pytest.fixture
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: Dict ):
snake_case_ : Any = tmp_path / "file.csv"
snake_case_ : Optional[int] = textwrap.dedent(
"\\n header1,header2\n 1,2\n 10,20\n " )
with open(lowerCAmelCase_ , "w" ) as f:
f.write(lowerCAmelCase_ )
return str(lowerCAmelCase_ )
@pytest.fixture
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: Dict ):
snake_case_ : Optional[int] = tmp_path / "malformed_file.csv"
snake_case_ : Any = textwrap.dedent(
"\\n header1,header2\n 1,2\n 10,20,\n " )
with open(lowerCAmelCase_ , "w" ) as f:
f.write(lowerCAmelCase_ )
return str(lowerCAmelCase_ )
@pytest.fixture
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: int , lowerCAmelCase_: int ):
snake_case_ : Tuple = tmp_path / "csv_with_image.csv"
snake_case_ : int = textwrap.dedent(
f"\\n image\n {image_file}\n " )
with open(lowerCAmelCase_ , "w" ) as f:
f.write(lowerCAmelCase_ )
return str(lowerCAmelCase_ )
@pytest.fixture
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: List[Any] ):
snake_case_ : Union[str, Any] = tmp_path / "csv_with_label.csv"
snake_case_ : Dict = textwrap.dedent(
"\\n label\n good\n bad\n good\n " )
with open(lowerCAmelCase_ , "w" ) as f:
f.write(lowerCAmelCase_ )
return str(lowerCAmelCase_ )
@pytest.fixture
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: Optional[int] ):
snake_case_ : str = tmp_path / "csv_with_int_list.csv"
snake_case_ : List[str] = textwrap.dedent(
"\\n int_list\n 1 2 3\n 4 5 6\n 7 8 9\n " )
with open(lowerCAmelCase_ , "w" ) as f:
f.write(lowerCAmelCase_ )
return str(lowerCAmelCase_ )
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: List[str] , lowerCAmelCase_: Optional[int] , lowerCAmelCase_: int ):
snake_case_ : int = Csv()
snake_case_ : Optional[int] = csv._generate_tables([[csv_file, malformed_csv_file]] )
with pytest.raises(lowerCAmelCase_ , match="Error tokenizing data" ):
for _ in generator:
pass
assert any(
record.levelname == "ERROR"
and "Failed to read file" in record.message
and os.path.basename(lowerCAmelCase_ ) in record.message
for record in caplog.records )
@require_pil
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: Tuple ):
with open(lowerCAmelCase_ , encoding="utf-8" ) as f:
snake_case_ : Optional[Any] = f.read().splitlines()[1]
snake_case_ : Dict = Csv(encoding="utf-8" , features=Features({"image": Image()} ) )
snake_case_ : Optional[int] = csv._generate_tables([[csv_file_with_image]] )
snake_case_ : List[Any] = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field("image" ).type == Image()()
snake_case_ : str = pa_table.to_pydict()["image"]
assert generated_content == [{"path": image_file, "bytes": None}]
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: str ):
with open(lowerCAmelCase_ , encoding="utf-8" ) as f:
snake_case_ : Optional[int] = f.read().splitlines()[1:]
snake_case_ : Union[str, Any] = Csv(encoding="utf-8" , features=Features({"label": ClassLabel(names=["good", "bad"] )} ) )
snake_case_ : List[Any] = csv._generate_tables([[csv_file_with_label]] )
snake_case_ : int = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field("label" ).type == ClassLabel(names=["good", "bad"] )()
snake_case_ : Optional[int] = pa_table.to_pydict()["label"]
assert generated_content == [ClassLabel(names=["good", "bad"] ).straint(lowerCAmelCase_ ) for label in labels]
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: List[str] ):
snake_case_ : str = Csv(encoding="utf-8" , sep="," , converters={"int_list": lambda lowerCAmelCase_ : [int(lowerCAmelCase_ ) for i in x.split()]} )
snake_case_ : str = csv._generate_tables([[csv_file_with_int_list]] )
snake_case_ : Optional[int] = pa.concat_tables([table for _, table in generator] )
assert pa.types.is_list(pa_table.schema.field("int_list" ).type )
snake_case_ : List[str] = pa_table.to_pydict()["int_list"]
assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
| 666 | from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFXLMRobertaModel
@require_tf
@require_sentencepiece
@require_tokenizers
class snake_case__ ( unittest.TestCase ):
@slow
def UpperCAmelCase__ ( self : int ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Dict = TFXLMRobertaModel.from_pretrained("jplu/tf-xlm-roberta-base" )
snake_case_ : Any = {
"input_ids": tf.convert_to_tensor([[0, 26_46, 1_02_69, 83, 9_99_42, 2]] , dtype=tf.intaa ), # "My dog is cute"
"attention_mask": tf.convert_to_tensor([[1, 1, 1, 1, 1, 1]] , dtype=tf.intaa ),
}
snake_case_ : List[str] = model(A__ )["last_hidden_state"]
snake_case_ : str = tf.TensorShape((1, 6, 7_68) )
self.assertEqual(output.shape , A__ )
# compare the actual values for a slice.
snake_case_ : List[str] = tf.convert_to_tensor(
[
[
[0.068_1762, 0.1089_4451, 0.0677_2504],
[-0.0642_3668, 0.0236_6615, 0.0432_9344],
[-0.0605_7295, 0.0997_4135, -0.0007_0584],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 666 | 1 |
import pickle
import numpy as np
from matplotlib import pyplot as plt
class snake_case__ :
def __init__( self : Union[str, Any] , A__ : Optional[Any] , A__ : Optional[Any] , A__ : Dict , A__ : Union[str, Any] , A__ : Union[str, Any] , A__ : Optional[Any]=0.2 , A__ : Any=0.2 ) -> int:
'''simple docstring'''
snake_case_ : Union[str, Any] = bp_numa
snake_case_ : str = bp_numa
snake_case_ : Optional[int] = bp_numa
snake_case_ : Optional[int] = conva_get[:2]
snake_case_ : Any = conva_get[2]
snake_case_ : Optional[Any] = size_pa
snake_case_ : Optional[Any] = rate_w
snake_case_ : Union[str, Any] = rate_t
snake_case_ : List[Any] = [
np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0] ) + 0.5 )
for i in range(self.conva[1] )
]
snake_case_ : Union[str, Any] = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
snake_case_ : Dict = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
snake_case_ : Optional[int] = -2 * np.random.rand(self.conva[1] ) + 1
snake_case_ : Tuple = -2 * np.random.rand(self.num_bpa ) + 1
snake_case_ : List[Any] = -2 * np.random.rand(self.num_bpa ) + 1
def UpperCAmelCase__ ( self : Optional[Any] , A__ : Optional[int] ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Any = {
"num_bp1": self.num_bpa,
"num_bp2": self.num_bpa,
"num_bp3": self.num_bpa,
"conv1": self.conva,
"step_conv1": self.step_conva,
"size_pooling1": self.size_poolinga,
"rate_weight": self.rate_weight,
"rate_thre": self.rate_thre,
"w_conv1": self.w_conva,
"wkj": self.wkj,
"vji": self.vji,
"thre_conv1": self.thre_conva,
"thre_bp2": self.thre_bpa,
"thre_bp3": self.thre_bpa,
}
with open(A__ , "wb" ) as f:
pickle.dump(A__ , A__ )
print(f"Model saved: {save_path}" )
@classmethod
def UpperCAmelCase__ ( cls : Optional[Any] , A__ : Tuple ) -> Optional[int]:
'''simple docstring'''
with open(A__ , "rb" ) as f:
snake_case_ : Tuple = pickle.load(A__ ) # noqa: S301
snake_case_ : Any = model_dic.get("conv1" )
conv_get.append(model_dic.get("step_conv1" ) )
snake_case_ : Any = model_dic.get("size_pooling1" )
snake_case_ : Optional[Any] = model_dic.get("num_bp1" )
snake_case_ : int = model_dic.get("num_bp2" )
snake_case_ : Tuple = model_dic.get("num_bp3" )
snake_case_ : int = model_dic.get("rate_weight" )
snake_case_ : Tuple = model_dic.get("rate_thre" )
# create model instance
snake_case_ : Optional[int] = CNN(A__ , A__ , A__ , A__ , A__ , A__ , A__ )
# modify model parameter
snake_case_ : Any = model_dic.get("w_conv1" )
snake_case_ : Dict = model_dic.get("wkj" )
snake_case_ : int = model_dic.get("vji" )
snake_case_ : str = model_dic.get("thre_conv1" )
snake_case_ : str = model_dic.get("thre_bp2" )
snake_case_ : Optional[Any] = model_dic.get("thre_bp3" )
return conv_ins
def UpperCAmelCase__ ( self : List[Any] , A__ : Tuple ) -> Optional[int]:
'''simple docstring'''
return 1 / (1 + np.exp(-1 * x ))
def UpperCAmelCase__ ( self : Any , A__ : Tuple ) -> Tuple:
'''simple docstring'''
return round(A__ , 3 )
def UpperCAmelCase__ ( self : str , A__ : Any , A__ : Tuple , A__ : Dict , A__ : Union[str, Any] , A__ : List[Any] ) -> int:
'''simple docstring'''
snake_case_ : Optional[int] = convs[0]
snake_case_ : List[str] = convs[1]
snake_case_ : Any = np.shape(A__ )[0]
# get the data slice of original image data, data_focus
snake_case_ : List[str] = []
for i_focus in range(0 , size_data - size_conv + 1 , A__ ):
for j_focus in range(0 , size_data - size_conv + 1 , A__ ):
snake_case_ : List[str] = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(A__ )
# calculate the feature map of every single kernel, and saved as list of matrix
snake_case_ : str = []
snake_case_ : Any = int((size_data - size_conv) / conv_step + 1 )
for i_map in range(A__ ):
snake_case_ : Optional[int] = []
for i_focus in range(len(A__ ) ):
snake_case_ : Any = (
np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map] ) )
- thre_convs[i_map]
)
featuremap.append(self.sig(A__ ) )
snake_case_ : Tuple = np.asmatrix(A__ ).reshape(
A__ , A__ )
data_featuremap.append(A__ )
# expanding the data slice to One dimenssion
snake_case_ : str = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(A__ ) )
snake_case_ : Tuple = np.asarray(A__ )
return focus_list, data_featuremap
def UpperCAmelCase__ ( self : str , A__ : Dict , A__ : Dict , A__ : Union[str, Any]="average_pool" ) -> Any:
'''simple docstring'''
snake_case_ : Dict = len(featuremaps[0] )
snake_case_ : Optional[Any] = int(size_map / size_pooling )
snake_case_ : str = []
for i_map in range(len(A__ ) ):
snake_case_ : Dict = featuremaps[i_map]
snake_case_ : List[str] = []
for i_focus in range(0 , A__ , A__ ):
for j_focus in range(0 , A__ , A__ ):
snake_case_ : Tuple = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(A__ ) )
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(A__ ) )
snake_case_ : List[Any] = np.asmatrix(A__ ).reshape(A__ , A__ )
featuremap_pooled.append(A__ )
return featuremap_pooled
def UpperCAmelCase__ ( self : List[str] , A__ : Optional[int] ) -> Tuple:
'''simple docstring'''
snake_case_ : str = []
for i in range(len(A__ ) ):
snake_case_ : str = np.shape(data[i] )
snake_case_ : Optional[Any] = data[i].reshape(1 , shapes[0] * shapes[1] )
snake_case_ : Dict = data_listed.getA().tolist()[0]
data_expanded.extend(A__ )
snake_case_ : Tuple = np.asarray(A__ )
return data_expanded
def UpperCAmelCase__ ( self : str , A__ : List[str] ) -> Optional[int]:
'''simple docstring'''
snake_case_ : List[str] = np.asarray(A__ )
snake_case_ : int = np.shape(A__ )
snake_case_ : str = data_mat.reshape(1 , shapes[0] * shapes[1] )
return data_expanded
def UpperCAmelCase__ ( self : Optional[int] , A__ : Dict , A__ : Union[str, Any] , A__ : List[str] , A__ : Union[str, Any] , A__ : Optional[int] ) -> Optional[int]:
'''simple docstring'''
snake_case_ : int = []
snake_case_ : Optional[Any] = 0
for i_map in range(A__ ):
snake_case_ : int = np.ones((size_map, size_map) )
for i in range(0 , A__ , A__ ):
for j in range(0 , A__ , A__ ):
snake_case_ : Union[str, Any] = pd_pool[
i_pool
]
snake_case_ : Union[str, Any] = i_pool + 1
snake_case_ : List[Any] = np.multiply(
A__ , np.multiply(out_map[i_map] , (1 - out_map[i_map]) ) )
pd_all.append(A__ )
return pd_all
def UpperCAmelCase__ ( self : Union[str, Any] , A__ : Optional[int] , A__ : Any , A__ : str , A__ : Optional[int] , A__ : int , A__ : int=bool ) -> int:
'''simple docstring'''
print("----------------------Start Training-------------------------" )
print((" - - Shape: Train_Data ", np.shape(A__ )) )
print((" - - Shape: Teach_Data ", np.shape(A__ )) )
snake_case_ : Any = 0
snake_case_ : List[Any] = []
snake_case_ : Tuple = 1_00_00
while rp < n_repeat and mse >= error_accuracy:
snake_case_ : List[Any] = 0
print(f"-------------Learning Time {rp}--------------" )
for p in range(len(A__ ) ):
# print('------------Learning Image: %d--------------'%p)
snake_case_ : List[Any] = np.asmatrix(datas_train[p] )
snake_case_ : int = np.asarray(datas_teach[p] )
snake_case_ ,snake_case_ : Dict = self.convolute(
A__ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
snake_case_ : Any = self.pooling(A__ , self.size_poolinga )
snake_case_ : Union[str, Any] = np.shape(A__ )
snake_case_ : int = self._expand(A__ )
snake_case_ : Any = data_bp_input
snake_case_ : Any = np.dot(A__ , self.vji.T ) - self.thre_bpa
snake_case_ : Optional[Any] = self.sig(A__ )
snake_case_ : str = np.dot(A__ , self.wkj.T ) - self.thre_bpa
snake_case_ : Union[str, Any] = self.sig(A__ )
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
snake_case_ : List[str] = np.multiply(
(data_teach - bp_outa) , np.multiply(A__ , (1 - bp_outa) ) )
snake_case_ : Optional[int] = np.multiply(
np.dot(A__ , self.wkj ) , np.multiply(A__ , (1 - bp_outa) ) )
snake_case_ : Any = np.dot(A__ , self.vji )
snake_case_ : Optional[int] = pd_i_all / (self.size_poolinga * self.size_poolinga)
snake_case_ : List[str] = pd_conva_pooled.T.getA().tolist()
snake_case_ : int = self._calculate_gradient_from_pool(
A__ , A__ , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , )
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1] ):
snake_case_ : List[Any] = self._expand_mat(pd_conva_all[k_conv] )
snake_case_ : str = self.rate_weight * np.dot(A__ , A__ )
snake_case_ : Dict = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]) )
snake_case_ : Tuple = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv] ) * self.rate_thre
)
# all connected layer
snake_case_ : List[str] = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
snake_case_ : Optional[int] = self.vji + pd_j_all.T * bp_outa * self.rate_weight
snake_case_ : str = self.thre_bpa - pd_k_all * self.rate_thre
snake_case_ : List[str] = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
snake_case_ : Tuple = np.sum(abs(data_teach - bp_outa ) )
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
snake_case_ : Any = rp + 1
snake_case_ : List[Any] = error_count / patterns
all_mse.append(A__ )
def draw_error():
snake_case_ : Any = [error_accuracy for i in range(int(n_repeat * 1.2 ) )]
plt.plot(A__ , "+-" )
plt.plot(A__ , "r--" )
plt.xlabel("Learning Times" )
plt.ylabel("All_mse" )
plt.grid(A__ , alpha=0.5 )
plt.show()
print("------------------Training Complished---------------------" )
print((" - - Training epoch: ", rp, f" - - Mse: {mse:.6f}") )
if draw_e:
draw_error()
return mse
def UpperCAmelCase__ ( self : Tuple , A__ : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Dict = []
print("-------------------Start Testing-------------------------" )
print((" - - Shape: Test_Data ", np.shape(A__ )) )
for p in range(len(A__ ) ):
snake_case_ : Tuple = np.asmatrix(datas_test[p] )
snake_case_ ,snake_case_ : Optional[Any] = self.convolute(
A__ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
snake_case_ : Tuple = self.pooling(A__ , self.size_poolinga )
snake_case_ : int = self._expand(A__ )
snake_case_ : List[str] = data_bp_input
snake_case_ : int = bp_outa * self.vji.T - self.thre_bpa
snake_case_ : Optional[int] = self.sig(A__ )
snake_case_ : Tuple = bp_outa * self.wkj.T - self.thre_bpa
snake_case_ : Optional[Any] = self.sig(A__ )
produce_out.extend(bp_outa.getA().tolist() )
snake_case_ : Any = [list(map(self.do_round , A__ ) ) for each in produce_out]
return np.asarray(A__ )
def UpperCAmelCase__ ( self : Optional[Any] , A__ : Dict ) -> List[Any]:
'''simple docstring'''
snake_case_ : Any = np.asmatrix(A__ )
snake_case_ ,snake_case_ : Optional[Any] = self.convolute(
A__ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
snake_case_ : Optional[int] = self.pooling(A__ , self.size_poolinga )
return data_conveda, data_pooleda
if __name__ == "__main__":
pass
| 666 | from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
UpperCAmelCase = logging.get_logger(__name__)
if is_vision_available():
import PIL
class snake_case__ ( _UpperCamelCase ):
_SCREAMING_SNAKE_CASE : Dict = ["pixel_values"]
def __init__( self : Union[str, Any] , A__ : bool = True , A__ : Dict[str, int] = None , A__ : PILImageResampling = PILImageResampling.BICUBIC , A__ : bool = True , A__ : Dict[str, int] = None , A__ : bool = True , A__ : Union[int, float] = 1 / 2_55 , A__ : bool = True , A__ : Optional[Union[float, List[float]]] = None , A__ : Optional[Union[float, List[float]]] = None , A__ : bool = True , **A__ : Optional[int] , ) -> None:
'''simple docstring'''
super().__init__(**A__ )
snake_case_ : str = size if size is not None else {"shortest_edge": 2_24}
snake_case_ : Union[str, Any] = get_size_dict(A__ , default_to_square=A__ )
snake_case_ : List[Any] = crop_size if crop_size is not None else {"height": 2_24, "width": 2_24}
snake_case_ : Dict = get_size_dict(A__ , default_to_square=A__ , param_name="crop_size" )
snake_case_ : str = do_resize
snake_case_ : str = size
snake_case_ : Optional[Any] = resample
snake_case_ : Any = do_center_crop
snake_case_ : Any = crop_size
snake_case_ : str = do_rescale
snake_case_ : Optional[Any] = rescale_factor
snake_case_ : int = do_normalize
snake_case_ : Optional[Any] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
snake_case_ : List[str] = image_std if image_std is not None else OPENAI_CLIP_STD
snake_case_ : int = do_convert_rgb
def UpperCAmelCase__ ( self : Optional[int] , A__ : np.ndarray , A__ : Dict[str, int] , A__ : PILImageResampling = PILImageResampling.BICUBIC , A__ : Optional[Union[str, ChannelDimension]] = None , **A__ : List[str] , ) -> np.ndarray:
'''simple docstring'''
snake_case_ : str = get_size_dict(A__ , default_to_square=A__ )
if "shortest_edge" not in size:
raise ValueError(f"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}" )
snake_case_ : str = get_resize_output_image_size(A__ , size=size["shortest_edge"] , default_to_square=A__ )
return resize(A__ , size=A__ , resample=A__ , data_format=A__ , **A__ )
def UpperCAmelCase__ ( self : Tuple , A__ : np.ndarray , A__ : Dict[str, int] , A__ : Optional[Union[str, ChannelDimension]] = None , **A__ : List[Any] , ) -> np.ndarray:
'''simple docstring'''
snake_case_ : Optional[int] = get_size_dict(A__ )
if "height" not in size or "width" not in size:
raise ValueError(f"The `size` parameter must contain the keys (height, width). Got {size.keys()}" )
return center_crop(A__ , size=(size["height"], size["width"]) , data_format=A__ , **A__ )
def UpperCAmelCase__ ( self : Optional[Any] , A__ : np.ndarray , A__ : Union[int, float] , A__ : Optional[Union[str, ChannelDimension]] = None , **A__ : List[str] , ) -> str:
'''simple docstring'''
return rescale(A__ , scale=A__ , data_format=A__ , **A__ )
def UpperCAmelCase__ ( self : Any , A__ : np.ndarray , A__ : Union[float, List[float]] , A__ : Union[float, List[float]] , A__ : Optional[Union[str, ChannelDimension]] = None , **A__ : Any , ) -> np.ndarray:
'''simple docstring'''
return normalize(A__ , mean=A__ , std=A__ , data_format=A__ , **A__ )
def UpperCAmelCase__ ( self : List[Any] , A__ : ImageInput , A__ : bool = None , A__ : Dict[str, int] = None , A__ : PILImageResampling = None , A__ : bool = None , A__ : int = None , A__ : bool = None , A__ : float = None , A__ : bool = None , A__ : Optional[Union[float, List[float]]] = None , A__ : Optional[Union[float, List[float]]] = None , A__ : bool = None , A__ : Optional[Union[str, TensorType]] = None , A__ : Optional[ChannelDimension] = ChannelDimension.FIRST , **A__ : Optional[Any] , ) -> PIL.Image.Image:
'''simple docstring'''
snake_case_ : List[Any] = do_resize if do_resize is not None else self.do_resize
snake_case_ : Union[str, Any] = size if size is not None else self.size
snake_case_ : Any = get_size_dict(A__ , param_name="size" , default_to_square=A__ )
snake_case_ : Optional[int] = resample if resample is not None else self.resample
snake_case_ : int = do_center_crop if do_center_crop is not None else self.do_center_crop
snake_case_ : List[str] = crop_size if crop_size is not None else self.crop_size
snake_case_ : Tuple = get_size_dict(A__ , param_name="crop_size" , default_to_square=A__ )
snake_case_ : Optional[int] = do_rescale if do_rescale is not None else self.do_rescale
snake_case_ : str = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case_ : List[Any] = do_normalize if do_normalize is not None else self.do_normalize
snake_case_ : Any = image_mean if image_mean is not None else self.image_mean
snake_case_ : List[str] = image_std if image_std is not None else self.image_std
snake_case_ : List[str] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
snake_case_ : List[Any] = make_list_of_images(A__ )
if not valid_images(A__ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
snake_case_ : Dict = [convert_to_rgb(A__ ) for image in images]
# All transformations expect numpy arrays.
snake_case_ : Dict = [to_numpy_array(A__ ) for image in images]
if do_resize:
snake_case_ : Dict = [self.resize(image=A__ , size=A__ , resample=A__ ) for image in images]
if do_center_crop:
snake_case_ : Tuple = [self.center_crop(image=A__ , size=A__ ) for image in images]
if do_rescale:
snake_case_ : str = [self.rescale(image=A__ , scale=A__ ) for image in images]
if do_normalize:
snake_case_ : int = [self.normalize(image=A__ , mean=A__ , std=A__ ) for image in images]
snake_case_ : List[Any] = [to_channel_dimension_format(A__ , A__ ) for image in images]
snake_case_ : Tuple = {"pixel_values": images}
return BatchFeature(data=A__ , tensor_type=A__ )
| 666 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase = {
"configuration_megatron_bert": ["MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MegatronBertConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
"MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MegatronBertForCausalLM",
"MegatronBertForMaskedLM",
"MegatronBertForMultipleChoice",
"MegatronBertForNextSentencePrediction",
"MegatronBertForPreTraining",
"MegatronBertForQuestionAnswering",
"MegatronBertForSequenceClassification",
"MegatronBertForTokenClassification",
"MegatronBertModel",
"MegatronBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_megatron_bert import (
MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
MegatronBertPreTrainedModel,
)
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 666 | from __future__ import annotations
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: tuple[int, int] , lowerCAmelCase_: int ):
snake_case_ ,snake_case_ : Dict = position
snake_case_ : int = [
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
snake_case_ : Union[str, Any] = []
for position in positions:
snake_case_ ,snake_case_ : Union[str, Any] = position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(lowerCAmelCase_ )
return permissible_positions
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: list[list[int]] ):
return not any(elem == 0 for row in board for elem in row )
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: list[list[int]] , lowerCAmelCase_: tuple[int, int] , lowerCAmelCase_: int ):
if is_complete(lowerCAmelCase_ ):
return True
for position in get_valid_pos(lowerCAmelCase_ , len(lowerCAmelCase_ ) ):
snake_case_ ,snake_case_ : Dict = position
if board[y][x] == 0:
snake_case_ : List[str] = curr + 1
if open_knight_tour_helper(lowerCAmelCase_ , lowerCAmelCase_ , curr + 1 ):
return True
snake_case_ : Dict = 0
return False
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: int ):
snake_case_ : Any = [[0 for i in range(lowerCAmelCase_ )] for j in range(lowerCAmelCase_ )]
for i in range(lowerCAmelCase_ ):
for j in range(lowerCAmelCase_ ):
snake_case_ : Optional[Any] = 1
if open_knight_tour_helper(lowerCAmelCase_ , (i, j) , 1 ):
return board
snake_case_ : Dict = 0
snake_case_ : str = f"Open Kight Tour cannot be performed on a board of size {n}"
raise ValueError(lowerCAmelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 666 | 1 |
from __future__ import annotations
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: list[int | float] , lowerCAmelCase_: int , lowerCAmelCase_: int ):
if len(lowerCAmelCase_ ) == 0:
raise ValueError("find_max() arg is an empty sequence" )
if (
left >= len(lowerCAmelCase_ )
or left < -len(lowerCAmelCase_ )
or right >= len(lowerCAmelCase_ )
or right < -len(lowerCAmelCase_ )
):
raise IndexError("list index out of range" )
if left == right:
return nums[left]
snake_case_ : List[Any] = (left + right) >> 1 # the middle
snake_case_ : Dict = find_max(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) # find max in range[left, mid]
snake_case_ : int = find_max(lowerCAmelCase_ , mid + 1 , lowerCAmelCase_ ) # find max in range[mid + 1, right]
return left_max if left_max >= right_max else right_max
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 666 | from ...configuration_utils import PretrainedConfig
class snake_case__ ( _UpperCamelCase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = "bert-generation"
def __init__( self : Optional[int] , A__ : List[Any]=5_03_58 , A__ : Any=10_24 , A__ : Any=24 , A__ : List[Any]=16 , A__ : List[Any]=40_96 , A__ : int="gelu" , A__ : List[str]=0.1 , A__ : List[str]=0.1 , A__ : str=5_12 , A__ : int=0.02 , A__ : Any=1E-12 , A__ : Optional[Any]=0 , A__ : List[str]=2 , A__ : Optional[int]=1 , A__ : str="absolute" , A__ : Any=True , **A__ : Optional[Any] , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(pad_token_id=A__ , bos_token_id=A__ , eos_token_id=A__ , **A__ )
snake_case_ : str = vocab_size
snake_case_ : int = hidden_size
snake_case_ : Optional[Any] = num_hidden_layers
snake_case_ : Union[str, Any] = num_attention_heads
snake_case_ : Optional[Any] = hidden_act
snake_case_ : Tuple = intermediate_size
snake_case_ : str = hidden_dropout_prob
snake_case_ : Optional[Any] = attention_probs_dropout_prob
snake_case_ : str = max_position_embeddings
snake_case_ : Optional[Any] = initializer_range
snake_case_ : Optional[int] = layer_norm_eps
snake_case_ : str = position_embedding_type
snake_case_ : Dict = use_cache
| 666 | 1 |
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
UpperCAmelCase = "bart"
UpperCAmelCase = True
@st.cache(allow_output_mutation=lowerCAmelCase_ )
def SCREAMING_SNAKE_CASE_ ( ):
if LOAD_DENSE_INDEX:
snake_case_ : Dict = AutoTokenizer.from_pretrained("yjernite/retribert-base-uncased" )
snake_case_ : Tuple = AutoModel.from_pretrained("yjernite/retribert-base-uncased" ).to("cuda:0" )
snake_case_ : Dict = qar_model.eval()
else:
snake_case_ ,snake_case_ : Union[str, Any] = (None, None)
if MODEL_TYPE == "bart":
snake_case_ : int = AutoTokenizer.from_pretrained("yjernite/bart_eli5" )
snake_case_ : List[Any] = AutoModelForSeqaSeqLM.from_pretrained("yjernite/bart_eli5" ).to("cuda:0" )
snake_case_ : Dict = torch.load("seq2seq_models/eli5_bart_model_blm_2.pth" )
sas_model.load_state_dict(save_dict["model"] )
snake_case_ : Tuple = sas_model.eval()
else:
snake_case_ ,snake_case_ : Optional[int] = make_qa_sas_model(
model_name="t5-small" , from_file="seq2seq_models/eli5_t5_model_1024_4.pth" , device="cuda:0" )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=lowerCAmelCase_ )
def SCREAMING_SNAKE_CASE_ ( ):
if LOAD_DENSE_INDEX:
snake_case_ : int = faiss.StandardGpuResources()
snake_case_ : str = datasets.load_dataset(path="wiki_snippets" , name="wiki40b_en_100_0" )["train"]
snake_case_ : Any = np.memmap(
"wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat" , dtype="float32" , mode="r" , shape=(wikiaab_passages.num_rows, 1_2_8) , )
snake_case_ : Optional[int] = faiss.IndexFlatIP(1_2_8 )
snake_case_ : int = faiss.index_cpu_to_gpu(lowerCAmelCase_ , 1 , lowerCAmelCase_ )
wikiaab_gpu_index_flat.add(lowerCAmelCase_ ) # TODO fix for larger GPU
else:
snake_case_ ,snake_case_ : Tuple = (None, None)
snake_case_ : List[str] = Elasticsearch([{"host": "localhost", "port": "9200"}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=lowerCAmelCase_ )
def SCREAMING_SNAKE_CASE_ ( ):
snake_case_ : Union[str, Any] = datasets.load_dataset("eli5" , name="LFQA_reddit" )
snake_case_ : Dict = elia["train_eli5"]
snake_case_ : Union[str, Any] = np.memmap(
"eli5_questions_reps.dat" , dtype="float32" , mode="r" , shape=(elia_train.num_rows, 1_2_8) )
snake_case_ : Any = faiss.IndexFlatIP(1_2_8 )
eli5_train_q_index.add(lowerCAmelCase_ )
return (elia_train, eli5_train_q_index)
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = load_indexes()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = load_models()
UpperCAmelCase , UpperCAmelCase = load_train_data()
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: Optional[Any] , lowerCAmelCase_: Dict=1_0 ):
snake_case_ : int = embed_questions_for_retrieval([question] , lowerCAmelCase_ , lowerCAmelCase_ )
snake_case_ ,snake_case_ : List[str] = eli5_train_q_index.search(lowerCAmelCase_ , lowerCAmelCase_ )
snake_case_ : str = [elia_train[int(lowerCAmelCase_ )] for i in I[0]]
return nn_examples
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: Dict , lowerCAmelCase_: Dict="wiki40b" , lowerCAmelCase_: Optional[int]="dense" , lowerCAmelCase_: Union[str, Any]=1_0 ):
if source == "none":
snake_case_ ,snake_case_ : List[Any] = (" <P> ".join(["" for _ in range(1_1 )] ).strip(), [])
else:
if method == "dense":
snake_case_ ,snake_case_ : Dict = query_qa_dense_index(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
else:
snake_case_ ,snake_case_ : Dict = query_es_index(
lowerCAmelCase_ , lowerCAmelCase_ , index_name="english_wiki40b_snippets_100w" , n_results=lowerCAmelCase_ , )
snake_case_ : int = [
(res["article_title"], res["section_title"].strip(), res["score"], res["passage_text"]) for res in hit_lst
]
snake_case_ : Union[str, Any] = "question: {} context: {}".format(lowerCAmelCase_ , lowerCAmelCase_ )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda lowerCAmelCase_ : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda lowerCAmelCase_ : None),
} )
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: str , lowerCAmelCase_: List[str] , lowerCAmelCase_: Dict , lowerCAmelCase_: Optional[Any]=6_4 , lowerCAmelCase_: Optional[int]=2_5_6 , lowerCAmelCase_: Optional[Any]=False , lowerCAmelCase_: List[str]=2 , lowerCAmelCase_: List[str]=0.9_5 , lowerCAmelCase_: List[Any]=0.8 ):
with torch.no_grad():
snake_case_ : str = qa_sas_generate(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , num_answers=1 , num_beams=lowerCAmelCase_ , min_len=lowerCAmelCase_ , max_len=lowerCAmelCase_ , do_sample=lowerCAmelCase_ , temp=lowerCAmelCase_ , top_p=lowerCAmelCase_ , top_k=lowerCAmelCase_ , max_input_length=1_0_2_4 , device="cuda:0" , )[0]
return (answer, support_list)
st.title("Long Form Question Answering with ELI5")
# Start sidebar
UpperCAmelCase = "<img src='https://huggingface.co/front/assets/huggingface_logo.svg'>"
UpperCAmelCase = "\n<html>\n <head>\n <style>\n .img-container {\n padding-left: 90px;\n padding-right: 90px;\n padding-top: 50px;\n padding-bottom: 50px;\n background-color: #f0f3f9;\n }\n </style>\n </head>\n <body>\n <span class=\"img-container\"> <!-- Inline parent element -->\n %s\n </span>\n </body>\n</html>\n" % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
UpperCAmelCase = "\nThis demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).\nFirst, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,\na pre-processed fixed snapshot of Wikipedia.\n"
st.sidebar.markdown(description, unsafe_allow_html=True)
UpperCAmelCase = [
"Answer the question",
"View the retrieved document only",
"View the most similar ELI5 question and answer",
"Show me everything, please!",
]
UpperCAmelCase = st.sidebar.checkbox("Demo options")
if demo_options:
UpperCAmelCase = st.sidebar.selectbox(
"",
action_list,
index=3,
)
UpperCAmelCase = action_list.index(action_st)
UpperCAmelCase = st.sidebar.selectbox(
"",
["Show full text of passages", "Show passage section titles"],
index=0,
)
UpperCAmelCase = show_type == "Show full text of passages"
else:
UpperCAmelCase = 3
UpperCAmelCase = True
UpperCAmelCase = st.sidebar.checkbox("Retrieval options")
if retrieval_options:
UpperCAmelCase = "\n ### Information retriever options\n\n The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding\n trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.\n The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.\n "
st.sidebar.markdown(retriever_info)
UpperCAmelCase = st.sidebar.selectbox("Which Wikipedia format should the model use?", ["wiki40b", "none"])
UpperCAmelCase = st.sidebar.selectbox("Which Wikipedia indexer should the model use?", ["dense", "sparse", "mixed"])
else:
UpperCAmelCase = "wiki40b"
UpperCAmelCase = "dense"
UpperCAmelCase = "beam"
UpperCAmelCase = 2
UpperCAmelCase = 6_4
UpperCAmelCase = 2_5_6
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = st.sidebar.checkbox("Generation options")
if generate_options:
UpperCAmelCase = "\n ### Answer generation options\n\n The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)\n weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with\n **beam** search, or **sample** from the decoder's output probabilities.\n "
st.sidebar.markdown(generate_info)
UpperCAmelCase = st.sidebar.selectbox("Would you like to use beam search or sample an answer?", ["beam", "sampled"])
UpperCAmelCase = st.sidebar.slider(
"Minimum generation length", min_value=8, max_value=2_5_6, value=6_4, step=8, format=None, key=None
)
UpperCAmelCase = st.sidebar.slider(
"Maximum generation length", min_value=6_4, max_value=5_1_2, value=2_5_6, step=1_6, format=None, key=None
)
if sampled == "beam":
UpperCAmelCase = st.sidebar.slider("Beam size", min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
UpperCAmelCase = st.sidebar.slider(
"Nucleus sampling p", min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
UpperCAmelCase = st.sidebar.slider(
"Temperature", min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
UpperCAmelCase = None
# start main text
UpperCAmelCase = [
"<MY QUESTION>",
"How do people make chocolate?",
"Why do we get a fever when we are sick?",
"How can different animals perceive different colors?",
"What is natural language processing?",
"What's the best way to treat a sunburn?",
"What exactly are vitamins ?",
"How does nuclear energy provide electricity?",
"What's the difference between viruses and bacteria?",
"Why are flutes classified as woodwinds when most of them are made out of metal ?",
"Why do people like drinking coffee even though it tastes so bad?",
"What happens when wine ages? How does it make the wine taste better?",
"If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?",
"How can we set a date to the beginning or end of an artistic period? Doesn't the change happen gradually?",
"How does New Zealand have so many large bird predators?",
]
UpperCAmelCase = st.selectbox(
"What would you like to ask? ---- select <MY QUESTION> to enter a new query",
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
UpperCAmelCase = st.text_input("Enter your question here:", "")
else:
UpperCAmelCase = question_s
if st.button("Show me!"):
if action in [0, 1, 3]:
if index_type == "mixed":
UpperCAmelCase , UpperCAmelCase = make_support(question, source=wiki_source, method="dense", n_results=1_0)
UpperCAmelCase , UpperCAmelCase = make_support(question, source=wiki_source, method="sparse", n_results=1_0)
UpperCAmelCase = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
UpperCAmelCase = support_list[:1_0]
UpperCAmelCase = "<P> " + " <P> ".join([res[-1] for res in support_list])
else:
UpperCAmelCase , UpperCAmelCase = make_support(question, source=wiki_source, method=index_type, n_results=1_0)
if action in [0, 3]:
UpperCAmelCase , UpperCAmelCase = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == "sampled"),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown("### The model generated answer is:")
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown("--- \n ### The model is drawing information from the following Wikipedia passages:")
for i, res in enumerate(support_list):
UpperCAmelCase = "https://en.wikipedia.org/wiki/{}".format(res[0].replace(" ", "_"))
UpperCAmelCase = res[1].strip()
if sec_titles == "":
UpperCAmelCase = "[{}]({})".format(res[0], wiki_url)
else:
UpperCAmelCase = sec_titles.split(" & ")
UpperCAmelCase = " & ".join(
["[{}]({}#{})".format(sec.strip(), wiki_url, sec.strip().replace(" ", "_")) for sec in sec_list]
)
st.markdown(
"{0:02d} - **Article**: {1:<18} <br> _Section_: {2}".format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
"> <span style=\"font-family:arial; font-size:10pt;\">" + res[-1] + "</span>", unsafe_allow_html=True
)
if action in [2, 3]:
UpperCAmelCase = find_nearest_training(question)
UpperCAmelCase = nn_train_list[0]
st.markdown(
"--- \n ### The most similar question in the ELI5 training set was: \n\n {}".format(train_exple["title"])
)
UpperCAmelCase = [
"{}. {}".format(i + 1, " \n".join([line.strip() for line in ans.split("\n") if line.strip() != ""]))
for i, (ans, sc) in enumerate(zip(train_exple["answers"]["text"], train_exple["answers"]["score"]))
if i == 0 or sc > 2
]
st.markdown("##### Its answers were: \n\n {}".format("\n".join(answers_st)))
UpperCAmelCase = "\n---\n\n**Disclaimer**\n\n*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.\nEvaluating biases of such a model and ensuring factual generations are still very much open research problems.\nTherefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*\n"
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 666 | import math
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: int ):
snake_case_ : Any = []
snake_case_ : List[str] = 2
snake_case_ : Optional[int] = int(math.sqrt(lowerCAmelCase_ ) ) # Size of every segment
snake_case_ : str = [True] * (end + 1)
snake_case_ : Any = []
while start <= end:
if temp[start] is True:
in_prime.append(lowerCAmelCase_ )
for i in range(start * start , end + 1 , lowerCAmelCase_ ):
snake_case_ : Union[str, Any] = False
start += 1
prime += in_prime
snake_case_ : Dict = end + 1
snake_case_ : Dict = min(2 * end , lowerCAmelCase_ )
while low <= n:
snake_case_ : Any = [True] * (high - low + 1)
for each in in_prime:
snake_case_ : Optional[Any] = math.floor(low / each ) * each
if t < low:
t += each
for j in range(lowerCAmelCase_ , high + 1 , lowerCAmelCase_ ):
snake_case_ : List[Any] = False
for j in range(len(lowerCAmelCase_ ) ):
if temp[j] is True:
prime.append(j + low )
snake_case_ : int = high + 1
snake_case_ : Union[str, Any] = min(high + end , lowerCAmelCase_ )
return prime
print(sieve(1_0**6))
| 666 | 1 |
import io
import math
from typing import Dict, Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import convert_to_rgb, normalize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
get_image_size,
infer_channel_dimension_format,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_vision_available, logging
from ...utils.import_utils import requires_backends
if is_vision_available():
import textwrap
from PIL import Image, ImageDraw, ImageFont
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
UpperCAmelCase = False
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = "ybelkada/fonts"
def SCREAMING_SNAKE_CASE_ ( ):
if is_torch_available() and not is_torch_greater_or_equal_than_1_11:
raise ImportError(
f"You are using torch=={torch.__version__}, but torch>=1.11.0 is required to use "
"Pix2StructImageProcessor. Please upgrade torch." )
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: Optional[Any] , lowerCAmelCase_: Optional[Any] , lowerCAmelCase_: str ):
requires_backends(lowerCAmelCase_ , ["torch"] )
_check_torch_version()
snake_case_ : Union[str, Any] = image_tensor.unsqueeze(0 )
snake_case_ : Optional[int] = torch.nn.functional.unfold(lowerCAmelCase_ , (patch_height, patch_width) , stride=(patch_height, patch_width) )
snake_case_ : Any = patches.reshape(image_tensor.size(0 ) , image_tensor.size(1 ) , lowerCAmelCase_ , lowerCAmelCase_ , -1 )
snake_case_ : Optional[int] = patches.permute(0 , 4 , 2 , 3 , 1 ).reshape(
image_tensor.size(2 ) // patch_height , image_tensor.size(3 ) // patch_width , image_tensor.size(1 ) * patch_height * patch_width , )
return patches.unsqueeze(0 )
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: str , lowerCAmelCase_: int = 3_6 , lowerCAmelCase_: str = "black" , lowerCAmelCase_: str = "white" , lowerCAmelCase_: int = 5 , lowerCAmelCase_: int = 5 , lowerCAmelCase_: int = 5 , lowerCAmelCase_: int = 5 , lowerCAmelCase_: Optional[bytes] = None , lowerCAmelCase_: Optional[str] = None , ):
requires_backends(lowerCAmelCase_ , "vision" )
# Add new lines so that each line is no more than 80 characters.
snake_case_ : Optional[Any] = textwrap.TextWrapper(width=8_0 )
snake_case_ : Union[str, Any] = wrapper.wrap(text=lowerCAmelCase_ )
snake_case_ : Union[str, Any] = "\n".join(lowerCAmelCase_ )
if font_bytes is not None and font_path is None:
snake_case_ : Optional[int] = io.BytesIO(lowerCAmelCase_ )
elif font_path is not None:
snake_case_ : str = font_path
else:
snake_case_ : Tuple = hf_hub_download(lowerCAmelCase_ , "Arial.TTF" )
snake_case_ : Union[str, Any] = ImageFont.truetype(lowerCAmelCase_ , encoding="UTF-8" , size=lowerCAmelCase_ )
# Use a temporary canvas to determine the width and height in pixels when
# rendering the text.
snake_case_ : Dict = ImageDraw.Draw(Image.new("RGB" , (1, 1) , lowerCAmelCase_ ) )
snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ : int = temp_draw.textbbox((0, 0) , lowerCAmelCase_ , lowerCAmelCase_ )
# Create the actual image with a bit of padding around the text.
snake_case_ : Dict = text_width + left_padding + right_padding
snake_case_ : Optional[int] = text_height + top_padding + bottom_padding
snake_case_ : str = Image.new("RGB" , (image_width, image_height) , lowerCAmelCase_ )
snake_case_ : int = ImageDraw.Draw(lowerCAmelCase_ )
draw.text(xy=(left_padding, top_padding) , text=lowerCAmelCase_ , fill=lowerCAmelCase_ , font=lowerCAmelCase_ )
return image
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: np.ndarray , lowerCAmelCase_: str , **lowerCAmelCase_: Dict ):
requires_backends(lowerCAmelCase_ , "vision" )
# Convert to PIL image if necessary
snake_case_ : List[str] = to_pil_image(lowerCAmelCase_ )
snake_case_ : List[str] = render_text(lowerCAmelCase_ , **lowerCAmelCase_ )
snake_case_ : Union[str, Any] = max(header_image.width , image.width )
snake_case_ : Optional[int] = int(image.height * (new_width / image.width) )
snake_case_ : Optional[Any] = int(header_image.height * (new_width / header_image.width) )
snake_case_ : Any = Image.new("RGB" , (new_width, new_height + new_header_height) , "white" )
new_image.paste(header_image.resize((new_width, new_header_height) ) , (0, 0) )
new_image.paste(image.resize((new_width, new_height) ) , (0, new_header_height) )
# Convert back to the original framework if necessary
snake_case_ : str = to_numpy_array(lowerCAmelCase_ )
if infer_channel_dimension_format(lowerCAmelCase_ ) == ChannelDimension.LAST:
snake_case_ : Tuple = to_channel_dimension_format(lowerCAmelCase_ , ChannelDimension.LAST )
return new_image
class snake_case__ ( _UpperCamelCase ):
_SCREAMING_SNAKE_CASE : Tuple = ["flattened_patches"]
def __init__( self : Union[str, Any] , A__ : bool = True , A__ : bool = True , A__ : Dict[str, int] = None , A__ : int = 20_48 , A__ : bool = False , **A__ : Tuple , ) -> None:
'''simple docstring'''
super().__init__(**A__ )
snake_case_ : Union[str, Any] = patch_size if patch_size is not None else {"height": 16, "width": 16}
snake_case_ : Optional[Any] = do_normalize
snake_case_ : List[Any] = do_convert_rgb
snake_case_ : List[str] = max_patches
snake_case_ : Tuple = is_vqa
def UpperCAmelCase__ ( self : List[Any] , A__ : np.ndarray , A__ : int , A__ : dict , **A__ : int ) -> np.ndarray:
'''simple docstring'''
requires_backends(self.extract_flattened_patches , "torch" )
_check_torch_version()
# convert to torch
snake_case_ : Optional[int] = to_channel_dimension_format(A__ , ChannelDimension.FIRST )
snake_case_ : Tuple = torch.from_numpy(A__ )
snake_case_ ,snake_case_ : Union[str, Any] = patch_size["height"], patch_size["width"]
snake_case_ ,snake_case_ : Tuple = get_image_size(A__ )
# maximize scale s.t.
snake_case_ : Union[str, Any] = math.sqrt(max_patches * (patch_height / image_height) * (patch_width / image_width) )
snake_case_ : str = max(min(math.floor(scale * image_height / patch_height ) , A__ ) , 1 )
snake_case_ : List[str] = max(min(math.floor(scale * image_width / patch_width ) , A__ ) , 1 )
snake_case_ : str = max(num_feasible_rows * patch_height , 1 )
snake_case_ : Any = max(num_feasible_cols * patch_width , 1 )
snake_case_ : Dict = torch.nn.functional.interpolate(
image.unsqueeze(0 ) , size=(resized_height, resized_width) , mode="bilinear" , align_corners=A__ , antialias=A__ , ).squeeze(0 )
# [1, rows, columns, patch_height * patch_width * image_channels]
snake_case_ : str = torch_extract_patches(A__ , A__ , A__ )
snake_case_ : Union[str, Any] = patches.shape
snake_case_ : Tuple = patches_shape[1]
snake_case_ : Dict = patches_shape[2]
snake_case_ : Any = patches_shape[3]
# [rows * columns, patch_height * patch_width * image_channels]
snake_case_ : str = patches.reshape([rows * columns, depth] )
# [rows * columns, 1]
snake_case_ : Optional[Any] = torch.arange(A__ ).reshape([rows, 1] ).repeat(1 , A__ ).reshape([rows * columns, 1] )
snake_case_ : Optional[int] = torch.arange(A__ ).reshape([1, columns] ).repeat(A__ , 1 ).reshape([rows * columns, 1] )
# Offset by 1 so the ids do not contain zeros, which represent padding.
row_ids += 1
col_ids += 1
# Prepare additional patch features.
# [rows * columns, 1]
snake_case_ : Union[str, Any] = row_ids.to(torch.floataa )
snake_case_ : str = col_ids.to(torch.floataa )
# [rows * columns, 2 + patch_height * patch_width * image_channels]
snake_case_ : str = torch.cat([row_ids, col_ids, patches] , -1 )
# [max_patches, 2 + patch_height * patch_width * image_channels]
snake_case_ : Tuple = torch.nn.functional.pad(A__ , [0, 0, 0, max_patches - (rows * columns)] ).float()
snake_case_ : List[str] = to_numpy_array(A__ )
return result
def UpperCAmelCase__ ( self : int , A__ : np.ndarray , A__ : Optional[Union[str, ChannelDimension]] = None , **A__ : List[str] ) -> np.ndarray:
'''simple docstring'''
if image.dtype == np.uinta:
snake_case_ : str = image.astype(np.floataa )
# take mean across the whole `image`
snake_case_ : List[str] = np.mean(A__ )
snake_case_ : List[Any] = np.std(A__ )
snake_case_ : Tuple = max(A__ , 1.0 / math.sqrt(np.prod(image.shape ) ) )
return normalize(A__ , mean=A__ , std=A__ , **A__ )
def UpperCAmelCase__ ( self : List[Any] , A__ : ImageInput , A__ : Optional[str] = None , A__ : bool = None , A__ : Optional[bool] = None , A__ : Optional[int] = None , A__ : Optional[Dict[str, int]] = None , A__ : Optional[Union[str, TensorType]] = None , A__ : ChannelDimension = ChannelDimension.FIRST , **A__ : List[Any] , ) -> ImageInput:
'''simple docstring'''
snake_case_ : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize
snake_case_ : List[Any] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
snake_case_ : List[Any] = patch_size if patch_size is not None else self.patch_size
snake_case_ : str = max_patches if max_patches is not None else self.max_patches
snake_case_ : str = self.is_vqa
if kwargs.get("data_format" , A__ ) is not None:
raise ValueError("data_format is not an accepted input as the outputs are " )
snake_case_ : Dict = make_list_of_images(A__ )
if not valid_images(A__ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
snake_case_ : int = [convert_to_rgb(A__ ) for image in images]
# All transformations expect numpy arrays.
snake_case_ : Tuple = [to_numpy_array(A__ ) for image in images]
if is_vqa:
if header_text is None:
raise ValueError("A header text must be provided for VQA models." )
snake_case_ : Optional[int] = kwargs.pop("font_bytes" , A__ )
snake_case_ : Any = kwargs.pop("font_path" , A__ )
if isinstance(A__ , A__ ):
snake_case_ : List[str] = [header_text] * len(A__ )
snake_case_ : Optional[Any] = [
render_header(A__ , header_text[i] , font_bytes=A__ , font_path=A__ )
for i, image in enumerate(A__ )
]
if do_normalize:
snake_case_ : Any = [self.normalize(image=A__ ) for image in images]
# convert to torch tensor and permute
snake_case_ : List[str] = [
self.extract_flattened_patches(image=A__ , max_patches=A__ , patch_size=A__ )
for image in images
]
# create attention mask in numpy
snake_case_ : Union[str, Any] = [(image.sum(axis=-1 ) != 0).astype(np.floataa ) for image in images]
snake_case_ : Optional[Any] = BatchFeature(
data={"flattened_patches": images, "attention_mask": attention_masks} , tensor_type=A__ )
return encoded_outputs
| 666 | import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ConditionalDetrImageProcessor
class snake_case__ ( unittest.TestCase ):
def __init__( self : List[str] , A__ : List[Any] , A__ : int=7 , A__ : Union[str, Any]=3 , A__ : List[str]=30 , A__ : Optional[int]=4_00 , A__ : Optional[Any]=True , A__ : Optional[int]=None , A__ : Optional[Any]=True , A__ : Any=[0.5, 0.5, 0.5] , A__ : int=[0.5, 0.5, 0.5] , A__ : Any=True , A__ : int=1 / 2_55 , A__ : List[str]=True , ) -> Dict:
'''simple docstring'''
snake_case_ : int = size if size is not None else {"shortest_edge": 18, "longest_edge": 13_33}
snake_case_ : Any = parent
snake_case_ : Optional[int] = batch_size
snake_case_ : List[Any] = num_channels
snake_case_ : Union[str, Any] = min_resolution
snake_case_ : List[Any] = max_resolution
snake_case_ : Tuple = do_resize
snake_case_ : Dict = size
snake_case_ : Optional[Any] = do_normalize
snake_case_ : int = image_mean
snake_case_ : List[Any] = image_std
snake_case_ : Tuple = do_rescale
snake_case_ : Any = rescale_factor
snake_case_ : Optional[int] = do_pad
def UpperCAmelCase__ ( self : int ) -> List[str]:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def UpperCAmelCase__ ( self : Optional[int] , A__ : Optional[int] , A__ : Any=False ) -> Optional[Any]:
'''simple docstring'''
if not batched:
snake_case_ : Any = image_inputs[0]
if isinstance(A__ , Image.Image ):
snake_case_ ,snake_case_ : Dict = image.size
else:
snake_case_ ,snake_case_ : int = image.shape[1], image.shape[2]
if w < h:
snake_case_ : Dict = int(self.size["shortest_edge"] * h / w )
snake_case_ : Optional[int] = self.size["shortest_edge"]
elif w > h:
snake_case_ : Optional[int] = self.size["shortest_edge"]
snake_case_ : str = int(self.size["shortest_edge"] * w / h )
else:
snake_case_ : Optional[int] = self.size["shortest_edge"]
snake_case_ : List[Any] = self.size["shortest_edge"]
else:
snake_case_ : str = []
for image in image_inputs:
snake_case_ ,snake_case_ : Tuple = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
snake_case_ : List[Any] = max(A__ , key=lambda A__ : item[0] )[0]
snake_case_ : int = max(A__ , key=lambda A__ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class snake_case__ ( _UpperCamelCase , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : Optional[int] = ConditionalDetrImageProcessor if is_vision_available() else None
def UpperCAmelCase__ ( self : Tuple ) -> Dict:
'''simple docstring'''
snake_case_ : List[str] = ConditionalDetrImageProcessingTester(self )
@property
def UpperCAmelCase__ ( self : Dict ) -> Tuple:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase__ ( self : Any ) -> Tuple:
'''simple docstring'''
snake_case_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A__ , "image_mean" ) )
self.assertTrue(hasattr(A__ , "image_std" ) )
self.assertTrue(hasattr(A__ , "do_normalize" ) )
self.assertTrue(hasattr(A__ , "do_resize" ) )
self.assertTrue(hasattr(A__ , "size" ) )
def UpperCAmelCase__ ( self : List[str] ) -> Tuple:
'''simple docstring'''
snake_case_ : List[str] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 13_33} )
self.assertEqual(image_processor.do_pad , A__ )
snake_case_ : Optional[int] = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=A__ )
self.assertEqual(image_processor.size , {"shortest_edge": 42, "longest_edge": 84} )
self.assertEqual(image_processor.do_pad , A__ )
def UpperCAmelCase__ ( self : str ) -> Optional[int]:
'''simple docstring'''
pass
def UpperCAmelCase__ ( self : Dict ) -> Tuple:
'''simple docstring'''
snake_case_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case_ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=A__ )
for image in image_inputs:
self.assertIsInstance(A__ , Image.Image )
# Test not batched input
snake_case_ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case_ ,snake_case_ : Optional[Any] = self.image_processor_tester.get_expected_values(A__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case_ ,snake_case_ : List[Any] = self.image_processor_tester.get_expected_values(A__ , batched=A__ )
snake_case_ : int = image_processing(A__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase__ ( self : int ) -> Any:
'''simple docstring'''
snake_case_ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case_ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=A__ , numpify=A__ )
for image in image_inputs:
self.assertIsInstance(A__ , np.ndarray )
# Test not batched input
snake_case_ : int = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case_ ,snake_case_ : List[str] = self.image_processor_tester.get_expected_values(A__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case_ : Optional[int] = image_processing(A__ , return_tensors="pt" ).pixel_values
snake_case_ ,snake_case_ : Dict = self.image_processor_tester.get_expected_values(A__ , batched=A__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase__ ( self : Tuple ) -> str:
'''simple docstring'''
snake_case_ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case_ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=A__ , torchify=A__ )
for image in image_inputs:
self.assertIsInstance(A__ , torch.Tensor )
# Test not batched input
snake_case_ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case_ ,snake_case_ : List[Any] = self.image_processor_tester.get_expected_values(A__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case_ : Any = image_processing(A__ , return_tensors="pt" ).pixel_values
snake_case_ ,snake_case_ : int = self.image_processor_tester.get_expected_values(A__ , batched=A__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def UpperCAmelCase__ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Dict = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
snake_case_ : Optional[Any] = json.loads(f.read() )
snake_case_ : int = {"image_id": 3_97_69, "annotations": target}
# encode them
snake_case_ : Optional[int] = ConditionalDetrImageProcessor.from_pretrained("microsoft/conditional-detr-resnet-50" )
snake_case_ : Any = image_processing(images=A__ , annotations=A__ , return_tensors="pt" )
# verify pixel values
snake_case_ : List[Any] = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["pixel_values"].shape , A__ )
snake_case_ : List[str] = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , A__ , atol=1E-4 ) )
# verify area
snake_case_ : Tuple = torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , A__ ) )
# verify boxes
snake_case_ : Any = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , A__ )
snake_case_ : str = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , A__ , atol=1E-3 ) )
# verify image_id
snake_case_ : List[Any] = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , A__ ) )
# verify is_crowd
snake_case_ : Union[str, Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , A__ ) )
# verify class_labels
snake_case_ : Any = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , A__ ) )
# verify orig_size
snake_case_ : Any = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , A__ ) )
# verify size
snake_case_ : List[str] = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , A__ ) )
@slow
def UpperCAmelCase__ ( self : int ) -> str:
'''simple docstring'''
snake_case_ : Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
snake_case_ : Any = json.loads(f.read() )
snake_case_ : Optional[Any] = {"file_name": "000000039769.png", "image_id": 3_97_69, "segments_info": target}
snake_case_ : int = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
snake_case_ : Union[str, Any] = ConditionalDetrImageProcessor(format="coco_panoptic" )
snake_case_ : str = image_processing(images=A__ , annotations=A__ , masks_path=A__ , return_tensors="pt" )
# verify pixel values
snake_case_ : int = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["pixel_values"].shape , A__ )
snake_case_ : str = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , A__ , atol=1E-4 ) )
# verify area
snake_case_ : Optional[int] = torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , A__ ) )
# verify boxes
snake_case_ : str = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , A__ )
snake_case_ : str = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , A__ , atol=1E-3 ) )
# verify image_id
snake_case_ : List[str] = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , A__ ) )
# verify is_crowd
snake_case_ : Optional[int] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , A__ ) )
# verify class_labels
snake_case_ : Optional[int] = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , A__ ) )
# verify masks
snake_case_ : Union[str, Any] = 82_28_73
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , A__ )
# verify orig_size
snake_case_ : Dict = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , A__ ) )
# verify size
snake_case_ : str = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , A__ ) )
| 666 | 1 |
import mpmath # for roots of unity
import numpy as np
class snake_case__ :
def __init__( self : str , A__ : List[str]=None , A__ : List[str]=None ) -> str:
'''simple docstring'''
snake_case_ : Optional[Any] = list(poly_a or [0] )[:]
snake_case_ : List[str] = list(poly_b or [0] )[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
snake_case_ : int = len(self.polyA )
while self.polyB[-1] == 0:
self.polyB.pop()
snake_case_ : Union[str, Any] = len(self.polyB )
# Add 0 to make lengths equal a power of 2
snake_case_ : Optional[Any] = int(
2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) )
while len(self.polyA ) < self.c_max_length:
self.polyA.append(0 )
while len(self.polyB ) < self.c_max_length:
self.polyB.append(0 )
# A complex root used for the fourier transform
snake_case_ : int = complex(mpmath.root(x=1 , n=self.c_max_length , k=1 ) )
# The product
snake_case_ : Dict = self.__multiply()
def UpperCAmelCase__ ( self : int , A__ : Tuple ) -> List[Any]:
'''simple docstring'''
snake_case_ : int = [[x] for x in self.polyA] if which == "A" else [[x] for x in self.polyB]
# Corner case
if len(A__ ) <= 1:
return dft[0]
#
snake_case_ : str = self.c_max_length // 2
while next_ncol > 0:
snake_case_ : Union[str, Any] = [[] for i in range(A__ )]
snake_case_ : Dict = self.root**next_ncol
# First half of next step
snake_case_ : Any = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(A__ ):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] )
current_root *= root
# Second half of next step
snake_case_ : Union[str, Any] = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(A__ ):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] )
current_root *= root
# Update
snake_case_ : Dict = new_dft
snake_case_ : List[str] = next_ncol // 2
return dft[0]
def UpperCAmelCase__ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
snake_case_ : List[Any] = self.__dft("A" )
snake_case_ : int = self.__dft("B" )
snake_case_ : Optional[Any] = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0] ) <= 1:
return inverce_c[0]
# Inverse DFT
snake_case_ : Any = 2
while next_ncol <= self.c_max_length:
snake_case_ : Union[str, Any] = [[] for i in range(A__ )]
snake_case_ : Union[str, Any] = self.root ** (next_ncol // 2)
snake_case_ : Dict = 1
# First half of next step
for j in range(self.c_max_length // next_ncol ):
for i in range(next_ncol // 2 ):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2 )
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root) )
current_root *= root
# Update
snake_case_ : List[str] = new_inverse_c
next_ncol *= 2
# Unpack
snake_case_ : Optional[Any] = [round(x[0].real , 8 ) + round(x[0].imag , 8 ) * 1j for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__( self : Dict ) -> Optional[int]:
'''simple docstring'''
snake_case_ : int = "A = " + " + ".join(
f"{coef}*x^{i}" for coef, i in enumerate(self.polyA[: self.len_A] ) )
snake_case_ : Optional[int] = "B = " + " + ".join(
f"{coef}*x^{i}" for coef, i in enumerate(self.polyB[: self.len_B] ) )
snake_case_ : List[Any] = "A*B = " + " + ".join(
f"{coef}*x^{i}" for coef, i in enumerate(self.product ) )
return f"{a}\n{b}\n{c}"
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 666 | import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
UpperCAmelCase = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class snake_case__ :
_SCREAMING_SNAKE_CASE : str = field(
default=_UpperCamelCase , metadata={"help": "Model type selected in the list: " + ", ".join(_UpperCamelCase )} )
_SCREAMING_SNAKE_CASE : str = field(
default=_UpperCamelCase , metadata={"help": "The input data dir. Should contain the .json files for the SQuAD task."} )
_SCREAMING_SNAKE_CASE : int = field(
default=1_2_8 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
_SCREAMING_SNAKE_CASE : int = field(
default=1_2_8 , metadata={"help": "When splitting up a long document into chunks, how much stride to take between chunks."} , )
_SCREAMING_SNAKE_CASE : int = field(
default=6_4 , metadata={
"help": (
"The maximum number of tokens for the question. Questions longer than this will "
"be truncated to this length."
)
} , )
_SCREAMING_SNAKE_CASE : int = field(
default=3_0 , metadata={
"help": (
"The maximum length of an answer that can be generated. This is needed because the start "
"and end predictions are not conditioned on one another."
)
} , )
_SCREAMING_SNAKE_CASE : bool = field(
default=_UpperCamelCase , metadata={"help": "Overwrite the cached training and evaluation sets"} )
_SCREAMING_SNAKE_CASE : bool = field(
default=_UpperCamelCase , metadata={"help": "If true, the SQuAD examples contain some that do not have an answer."} )
_SCREAMING_SNAKE_CASE : float = field(
default=0.0 , metadata={"help": "If null_score - best_non_null is greater than the threshold predict null."} )
_SCREAMING_SNAKE_CASE : int = field(
default=2_0 , metadata={"help": "If null_score - best_non_null is greater than the threshold predict null."} )
_SCREAMING_SNAKE_CASE : int = field(
default=0 , metadata={
"help": (
"language id of input for language-specific xlm models (see"
" tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)"
)
} , )
_SCREAMING_SNAKE_CASE : int = field(default=1 , metadata={"help": "multiple threads for converting example to features"} )
class snake_case__ ( _UpperCamelCase ):
_SCREAMING_SNAKE_CASE : Tuple = "train"
_SCREAMING_SNAKE_CASE : Any = "dev"
class snake_case__ ( _UpperCamelCase ):
_SCREAMING_SNAKE_CASE : SquadDataTrainingArguments
_SCREAMING_SNAKE_CASE : List[SquadFeatures]
_SCREAMING_SNAKE_CASE : Split
_SCREAMING_SNAKE_CASE : bool
def __init__( self : str , A__ : SquadDataTrainingArguments , A__ : PreTrainedTokenizer , A__ : Optional[int] = None , A__ : Union[str, Split] = Split.train , A__ : Optional[bool] = False , A__ : Optional[str] = None , A__ : Optional[str] = "pt" , ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Tuple = args
snake_case_ : int = is_language_sensitive
snake_case_ : int = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(A__ , A__ ):
try:
snake_case_ : List[str] = Split[mode]
except KeyError:
raise KeyError("mode is not a valid split name" )
snake_case_ : Tuple = mode
# Load data features from cache or dataset file
snake_case_ : Dict = "v2" if args.version_2_with_negative else "v1"
snake_case_ : List[Any] = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f"cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}" , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
snake_case_ : List[Any] = cached_features_file + ".lock"
with FileLock(A__ ):
if os.path.exists(A__ ) and not args.overwrite_cache:
snake_case_ : int = time.time()
snake_case_ : List[Any] = torch.load(A__ )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
snake_case_ : Tuple = self.old_features["features"]
snake_case_ : List[str] = self.old_features.get("dataset" , A__ )
snake_case_ : Tuple = self.old_features.get("examples" , A__ )
logger.info(
f"Loading features from cached file {cached_features_file} [took %.3f s]" , time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
f"Deleting cached file {cached_features_file} will allow dataset and examples to be cached in"
" future run" )
else:
if mode == Split.dev:
snake_case_ : Tuple = self.processor.get_dev_examples(args.data_dir )
else:
snake_case_ : Tuple = self.processor.get_train_examples(args.data_dir )
snake_case_ ,snake_case_ : Optional[Any] = squad_convert_examples_to_features(
examples=self.examples , tokenizer=A__ , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=A__ , )
snake_case_ : Any = time.time()
torch.save(
{"features": self.features, "dataset": self.dataset, "examples": self.examples} , A__ , )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f"Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]" )
def __len__( self : str ) -> Dict:
'''simple docstring'''
return len(self.features )
def __getitem__( self : Optional[int] , A__ : Optional[int] ) -> Dict[str, torch.Tensor]:
'''simple docstring'''
snake_case_ : Any = self.features[i]
snake_case_ : Optional[int] = torch.tensor(feature.input_ids , dtype=torch.long )
snake_case_ : Union[str, Any] = torch.tensor(feature.attention_mask , dtype=torch.long )
snake_case_ : List[Any] = torch.tensor(feature.token_type_ids , dtype=torch.long )
snake_case_ : List[Any] = torch.tensor(feature.cls_index , dtype=torch.long )
snake_case_ : str = torch.tensor(feature.p_mask , dtype=torch.float )
snake_case_ : str = torch.tensor(feature.is_impossible , dtype=torch.float )
snake_case_ : Optional[int] = {
"input_ids": input_ids,
"attention_mask": attention_mask,
"token_type_ids": token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({"cls_index": cls_index, "p_mask": p_mask} )
if self.args.version_2_with_negative:
inputs.update({"is_impossible": is_impossible} )
if self.is_language_sensitive:
inputs.update({"langs": (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
snake_case_ : Any = torch.tensor(feature.start_position , dtype=torch.long )
snake_case_ : List[Any] = torch.tensor(feature.end_position , dtype=torch.long )
inputs.update({"start_positions": start_positions, "end_positions": end_positions} )
return inputs
| 666 | 1 |
import unittest
from transformers import MobileBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertModel,
)
class snake_case__ :
def __init__( self : Any , A__ : Optional[int] , A__ : int=13 , A__ : Union[str, Any]=7 , A__ : Any=True , A__ : List[str]=True , A__ : int=True , A__ : Any=True , A__ : int=99 , A__ : List[str]=64 , A__ : Optional[int]=32 , A__ : List[Any]=5 , A__ : Any=4 , A__ : Dict=37 , A__ : Optional[int]="gelu" , A__ : List[Any]=0.1 , A__ : List[Any]=0.1 , A__ : List[str]=5_12 , A__ : Union[str, Any]=16 , A__ : Tuple=2 , A__ : Optional[int]=0.02 , A__ : List[str]=3 , A__ : List[Any]=4 , A__ : Dict=None , ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Optional[Any] = parent
snake_case_ : List[str] = batch_size
snake_case_ : Union[str, Any] = seq_length
snake_case_ : List[Any] = is_training
snake_case_ : int = use_input_mask
snake_case_ : int = use_token_type_ids
snake_case_ : Dict = use_labels
snake_case_ : str = vocab_size
snake_case_ : str = hidden_size
snake_case_ : str = embedding_size
snake_case_ : int = num_hidden_layers
snake_case_ : Any = num_attention_heads
snake_case_ : Tuple = intermediate_size
snake_case_ : Tuple = hidden_act
snake_case_ : str = hidden_dropout_prob
snake_case_ : List[Any] = attention_probs_dropout_prob
snake_case_ : Any = max_position_embeddings
snake_case_ : Optional[Any] = type_vocab_size
snake_case_ : List[str] = type_sequence_label_size
snake_case_ : int = initializer_range
snake_case_ : str = num_labels
snake_case_ : List[str] = num_choices
snake_case_ : Optional[Any] = scope
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
snake_case_ : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ : Dict = None
if self.use_input_mask:
snake_case_ : Dict = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ : Union[str, Any] = None
if self.use_token_type_ids:
snake_case_ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case_ : List[str] = None
snake_case_ : int = None
snake_case_ : Tuple = None
if self.use_labels:
snake_case_ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case_ : Optional[int] = ids_tensor([self.batch_size] , self.num_choices )
snake_case_ : List[str] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase__ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
return MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A__ , initializer_range=self.initializer_range , )
def UpperCAmelCase__ ( self : List[Any] , A__ : List[Any] , A__ : List[str] , A__ : str , A__ : str , A__ : Optional[Any] , A__ : Dict , A__ : Any ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : str = MobileBertModel(config=A__ )
model.to(A__ )
model.eval()
snake_case_ : List[str] = model(A__ , attention_mask=A__ , token_type_ids=A__ )
snake_case_ : Optional[int] = model(A__ , token_type_ids=A__ )
snake_case_ : str = model(A__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCAmelCase__ ( self : List[str] , A__ : Optional[Any] , A__ : int , A__ : Union[str, Any] , A__ : Optional[int] , A__ : List[str] , A__ : Union[str, Any] , A__ : Optional[Any] ) -> List[str]:
'''simple docstring'''
snake_case_ : Optional[int] = MobileBertForMaskedLM(config=A__ )
model.to(A__ )
model.eval()
snake_case_ : Union[str, Any] = model(A__ , attention_mask=A__ , token_type_ids=A__ , labels=A__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase__ ( self : int , A__ : Any , A__ : Tuple , A__ : Optional[int] , A__ : Optional[int] , A__ : int , A__ : str , A__ : Dict ) -> List[str]:
'''simple docstring'''
snake_case_ : List[str] = MobileBertForNextSentencePrediction(config=A__ )
model.to(A__ )
model.eval()
snake_case_ : Union[str, Any] = model(
A__ , attention_mask=A__ , token_type_ids=A__ , labels=A__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def UpperCAmelCase__ ( self : str , A__ : List[str] , A__ : Any , A__ : List[str] , A__ : Dict , A__ : Tuple , A__ : List[str] , A__ : Dict ) -> int:
'''simple docstring'''
snake_case_ : Optional[int] = MobileBertForPreTraining(config=A__ )
model.to(A__ )
model.eval()
snake_case_ : List[Any] = model(
A__ , attention_mask=A__ , token_type_ids=A__ , labels=A__ , next_sentence_label=A__ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def UpperCAmelCase__ ( self : Optional[int] , A__ : Optional[int] , A__ : Tuple , A__ : str , A__ : Union[str, Any] , A__ : List[Any] , A__ : Optional[Any] , A__ : Union[str, Any] ) -> Tuple:
'''simple docstring'''
snake_case_ : str = MobileBertForQuestionAnswering(config=A__ )
model.to(A__ )
model.eval()
snake_case_ : Tuple = model(
A__ , attention_mask=A__ , token_type_ids=A__ , start_positions=A__ , end_positions=A__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase__ ( self : List[Any] , A__ : Union[str, Any] , A__ : int , A__ : Dict , A__ : List[Any] , A__ : List[Any] , A__ : int , A__ : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Optional[int] = self.num_labels
snake_case_ : List[Any] = MobileBertForSequenceClassification(A__ )
model.to(A__ )
model.eval()
snake_case_ : Union[str, Any] = model(A__ , attention_mask=A__ , token_type_ids=A__ , labels=A__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase__ ( self : Optional[Any] , A__ : List[str] , A__ : Optional[Any] , A__ : str , A__ : str , A__ : int , A__ : str , A__ : int ) -> Optional[int]:
'''simple docstring'''
snake_case_ : List[str] = self.num_labels
snake_case_ : Tuple = MobileBertForTokenClassification(config=A__ )
model.to(A__ )
model.eval()
snake_case_ : Dict = model(A__ , attention_mask=A__ , token_type_ids=A__ , labels=A__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase__ ( self : Optional[int] , A__ : Dict , A__ : Optional[Any] , A__ : Union[str, Any] , A__ : Dict , A__ : Any , A__ : Optional[int] , A__ : Optional[int] ) -> Optional[int]:
'''simple docstring'''
snake_case_ : str = self.num_choices
snake_case_ : Tuple = MobileBertForMultipleChoice(config=A__ )
model.to(A__ )
model.eval()
snake_case_ : Optional[Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case_ : List[str] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case_ : int = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case_ : Union[str, Any] = model(
A__ , attention_mask=A__ , token_type_ids=A__ , labels=A__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCAmelCase__ ( self : int ) -> Any:
'''simple docstring'''
snake_case_ : int = self.prepare_config_and_inputs()
(
(
snake_case_
) ,(
snake_case_
) ,(
snake_case_
) ,(
snake_case_
) ,(
snake_case_
) ,(
snake_case_
) ,(
snake_case_
) ,
) : Tuple = config_and_inputs
snake_case_ : List[str] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class snake_case__ ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = (
(
MobileBertModel,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
)
if is_torch_available()
else ()
)
_SCREAMING_SNAKE_CASE : Union[str, Any] = (
{
"feature-extraction": MobileBertModel,
"fill-mask": MobileBertForMaskedLM,
"question-answering": MobileBertForQuestionAnswering,
"text-classification": MobileBertForSequenceClassification,
"token-classification": MobileBertForTokenClassification,
"zero-shot": MobileBertForSequenceClassification,
}
if is_torch_available()
else {}
)
_SCREAMING_SNAKE_CASE : List[Any] = True
def UpperCAmelCase__ ( self : Any , A__ : List[Any] , A__ : Tuple , A__ : List[Any]=False ) -> List[str]:
'''simple docstring'''
snake_case_ : List[Any] = super()._prepare_for_class(A__ , A__ , return_labels=A__ )
if return_labels:
if model_class in get_values(A__ ):
snake_case_ : str = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=A__ )
snake_case_ : Optional[Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=A__ )
return inputs_dict
def UpperCAmelCase__ ( self : Any ) -> List[str]:
'''simple docstring'''
snake_case_ : Dict = MobileBertModelTester(self )
snake_case_ : List[Any] = ConfigTester(self , config_class=A__ , hidden_size=37 )
def UpperCAmelCase__ ( self : str ) -> str:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : str ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*A__ )
def UpperCAmelCase__ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
snake_case_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*A__ )
def UpperCAmelCase__ ( self : Optional[int] ) -> str:
'''simple docstring'''
snake_case_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*A__ )
def UpperCAmelCase__ ( self : Tuple ) -> Any:
'''simple docstring'''
snake_case_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*A__ )
def UpperCAmelCase__ ( self : Tuple ) -> List[str]:
'''simple docstring'''
snake_case_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*A__ )
def UpperCAmelCase__ ( self : Dict ) -> Any:
'''simple docstring'''
snake_case_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*A__ )
def UpperCAmelCase__ ( self : str ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*A__ )
def UpperCAmelCase__ ( self : Optional[int] ) -> Dict:
'''simple docstring'''
snake_case_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*A__ )
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: int ):
return torch.tensor(
lowerCAmelCase_ , dtype=torch.long , device=lowerCAmelCase_ , )
UpperCAmelCase = 1e-3
@require_torch
@require_sentencepiece
@require_tokenizers
class snake_case__ ( unittest.TestCase ):
@slow
def UpperCAmelCase__ ( self : Optional[int] ) -> str:
'''simple docstring'''
snake_case_ : Optional[Any] = MobileBertModel.from_pretrained("google/mobilebert-uncased" ).to(A__ )
snake_case_ : int = _long_tensor([[1_01, 71_10, 10_05, 10_56, 20_23, 1_13_33, 1_74_13, 10_29, 1_02]] )
with torch.no_grad():
snake_case_ : List[Any] = model(A__ )[0]
snake_case_ : Optional[int] = torch.Size((1, 9, 5_12) )
self.assertEqual(output.shape , A__ )
snake_case_ : Optional[Any] = torch.tensor(
[
[
[-2.4_736_526E07, 8.2_691_656E04, 1.6_521_838E05],
[-5.7_541_704E-01, 3.9_056_022E00, 4.4_011_507E00],
[2.6_047_359E00, 1.5_677_652E00, -1.7_324_188E-01],
]
] , device=A__ , )
# MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a
# ~1 difference, it's therefore not a good idea to measure using addition.
# Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the
# result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE
snake_case_ : str = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE )
snake_case_ : int = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE )
self.assertTrue(lower_bound and upper_bound )
| 666 | import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
"microsoft/git-base": "https://huggingface.co/microsoft/git-base/resolve/main/config.json",
}
class snake_case__ ( _UpperCamelCase ):
_SCREAMING_SNAKE_CASE : Dict = "git_vision_model"
def __init__( self : int , A__ : Union[str, Any]=7_68 , A__ : List[Any]=30_72 , A__ : Tuple=12 , A__ : Optional[Any]=12 , A__ : Optional[int]=3 , A__ : List[str]=2_24 , A__ : Dict=16 , A__ : int="quick_gelu" , A__ : Any=1E-5 , A__ : Tuple=0.0 , A__ : Optional[int]=0.02 , **A__ : List[str] , ) -> Optional[int]:
'''simple docstring'''
super().__init__(**A__ )
snake_case_ : Optional[Any] = hidden_size
snake_case_ : str = intermediate_size
snake_case_ : Optional[Any] = num_hidden_layers
snake_case_ : int = num_attention_heads
snake_case_ : Optional[int] = num_channels
snake_case_ : Union[str, Any] = patch_size
snake_case_ : List[str] = image_size
snake_case_ : List[Any] = initializer_range
snake_case_ : Any = attention_dropout
snake_case_ : Any = layer_norm_eps
snake_case_ : int = hidden_act
@classmethod
def UpperCAmelCase__ ( cls : List[Any] , A__ : Union[str, os.PathLike] , **A__ : Optional[int] ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(A__ )
snake_case_ ,snake_case_ : Tuple = cls.get_config_dict(A__ , **A__ )
# get the vision config dict if we are loading from GITConfig
if config_dict.get("model_type" ) == "git":
snake_case_ : Any = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(A__ , **A__ )
class snake_case__ ( _UpperCamelCase ):
_SCREAMING_SNAKE_CASE : Optional[Any] = "git"
def __init__( self : Any , A__ : List[str]=None , A__ : List[str]=3_05_22 , A__ : Tuple=7_68 , A__ : Tuple=6 , A__ : str=12 , A__ : Any=30_72 , A__ : List[str]="gelu" , A__ : int=0.1 , A__ : Dict=0.1 , A__ : Any=10_24 , A__ : Optional[Any]=0.02 , A__ : Optional[Any]=1E-12 , A__ : Dict=0 , A__ : Any="absolute" , A__ : Tuple=True , A__ : Any=False , A__ : Tuple=1_01 , A__ : Tuple=1_02 , A__ : List[Any]=None , **A__ : List[str] , ) -> int:
'''simple docstring'''
super().__init__(bos_token_id=A__ , eos_token_id=A__ , pad_token_id=A__ , **A__ )
if vision_config is None:
snake_case_ : int = {}
logger.info("vision_config is None. initializing the GitVisionConfig with default values." )
snake_case_ : str = GitVisionConfig(**A__ )
snake_case_ : int = vocab_size
snake_case_ : List[Any] = hidden_size
snake_case_ : Tuple = num_hidden_layers
snake_case_ : List[Any] = num_attention_heads
snake_case_ : Any = hidden_act
snake_case_ : Dict = intermediate_size
snake_case_ : Any = hidden_dropout_prob
snake_case_ : Any = attention_probs_dropout_prob
snake_case_ : Union[str, Any] = max_position_embeddings
snake_case_ : List[str] = initializer_range
snake_case_ : List[str] = layer_norm_eps
snake_case_ : Any = position_embedding_type
snake_case_ : Union[str, Any] = use_cache
snake_case_ : str = tie_word_embeddings
snake_case_ : List[Any] = num_image_with_embedding
snake_case_ : Dict = bos_token_id
snake_case_ : int = eos_token_id
def UpperCAmelCase__ ( self : Any ) -> int:
'''simple docstring'''
snake_case_ : Tuple = copy.deepcopy(self.__dict__ )
snake_case_ : Optional[int] = self.vision_config.to_dict()
snake_case_ : Tuple = self.__class__.model_type
return output
| 666 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
"google/mobilenet_v2_1.4_224": "https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json",
"google/mobilenet_v2_1.0_224": "https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json",
"google/mobilenet_v2_0.75_160": "https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json",
"google/mobilenet_v2_0.35_96": "https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json",
# See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2
}
class snake_case__ ( _UpperCamelCase ):
_SCREAMING_SNAKE_CASE : Tuple = "mobilenet_v2"
def __init__( self : Optional[int] , A__ : int=3 , A__ : List[str]=2_24 , A__ : Union[str, Any]=1.0 , A__ : Dict=8 , A__ : int=8 , A__ : Any=6 , A__ : Optional[int]=32 , A__ : Optional[int]=True , A__ : Optional[Any]=True , A__ : List[Any]="relu6" , A__ : Dict=True , A__ : int=0.8 , A__ : str=0.02 , A__ : Dict=0.001 , A__ : Optional[int]=2_55 , **A__ : List[str] , ) -> int:
'''simple docstring'''
super().__init__(**A__ )
if depth_multiplier <= 0:
raise ValueError("depth_multiplier must be greater than zero." )
snake_case_ : Union[str, Any] = num_channels
snake_case_ : Optional[Any] = image_size
snake_case_ : List[str] = depth_multiplier
snake_case_ : List[Any] = depth_divisible_by
snake_case_ : Optional[int] = min_depth
snake_case_ : List[Any] = expand_ratio
snake_case_ : List[Any] = output_stride
snake_case_ : Any = first_layer_is_expansion
snake_case_ : Dict = finegrained_output
snake_case_ : List[Any] = hidden_act
snake_case_ : Any = tf_padding
snake_case_ : Union[str, Any] = classifier_dropout_prob
snake_case_ : int = initializer_range
snake_case_ : Optional[Any] = layer_norm_eps
snake_case_ : List[Any] = semantic_loss_ignore_index
class snake_case__ ( _UpperCamelCase ):
_SCREAMING_SNAKE_CASE : Tuple = version.parse("1.11" )
@property
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict([("pixel_values", {0: "batch"})] )
@property
def UpperCAmelCase__ ( self : Dict ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "image-classification":
return OrderedDict([("logits", {0: "batch"})] )
else:
return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})] )
@property
def UpperCAmelCase__ ( self : Tuple ) -> float:
'''simple docstring'''
return 1E-4
| 666 | def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: str , lowerCAmelCase_: str ):
def get_matched_characters(lowerCAmelCase_: str , lowerCAmelCase_: str ) -> str:
snake_case_ : Tuple = []
snake_case_ : Tuple = min(len(_stra ) , len(_stra ) ) // 2
for i, l in enumerate(_stra ):
snake_case_ : str = int(max(0 , i - limit ) )
snake_case_ : Optional[int] = int(min(i + limit + 1 , len(_stra ) ) )
if l in _stra[left:right]:
matched.append(lowerCAmelCase_ )
snake_case_ : List[Any] = f"{_stra[0:_stra.index(lowerCAmelCase_ )]} {_stra[_stra.index(lowerCAmelCase_ ) + 1:]}"
return "".join(lowerCAmelCase_ )
# matching characters
snake_case_ : List[Any] = get_matched_characters(lowerCAmelCase_ , lowerCAmelCase_ )
snake_case_ : int = get_matched_characters(lowerCAmelCase_ , lowerCAmelCase_ )
snake_case_ : Optional[int] = len(lowerCAmelCase_ )
# transposition
snake_case_ : List[str] = (
len([(ca, ca) for ca, ca in zip(lowerCAmelCase_ , lowerCAmelCase_ ) if ca != ca] ) // 2
)
if not match_count:
snake_case_ : str = 0.0
else:
snake_case_ : Optional[Any] = (
1
/ 3
* (
match_count / len(lowerCAmelCase_ )
+ match_count / len(lowerCAmelCase_ )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
snake_case_ : Optional[Any] = 0
for ca, ca in zip(stra[:4] , stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler("hello", "world"))
| 666 | 1 |
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class snake_case__ ( unittest.TestCase ):
def UpperCAmelCase__ ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCAmelCase__ ( self : int ) -> Any:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ : str = UNetaDModel(
sample_size=(32, 64) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_28, 1_28) , down_block_types=("AttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "AttnUpBlock2D") , )
return model
@property
def UpperCAmelCase__ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ : Any = UNetaDConditionModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_28, 1_28) , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , cross_attention_dim=10 , )
return model
@property
def UpperCAmelCase__ ( self : Tuple ) -> str:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ : int = AutoencoderKL(
sample_size=(1_28, 64) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(1_28, 1_28) , down_block_types=("DownEncoderBlock2D", "DownEncoderBlock2D") , up_block_types=("UpDecoderBlock2D", "UpDecoderBlock2D") , )
snake_case_ : Union[str, Any] = UNetaDModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_28, 1_28) , down_block_types=("AttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "AttnUpBlock2D") , )
return vqvae, unet
@slow
def UpperCAmelCase__ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
snake_case_ : Tuple = "cpu" # ensure determinism for the device-dependent torch.Generator
snake_case_ : int = Mel(
x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , )
snake_case_ : Optional[int] = DDPMScheduler()
snake_case_ : List[Any] = AudioDiffusionPipeline(vqvae=A__ , unet=self.dummy_unet , mel=A__ , scheduler=A__ )
snake_case_ : int = pipe.to(A__ )
pipe.set_progress_bar_config(disable=A__ )
snake_case_ : List[str] = torch.Generator(device=A__ ).manual_seed(42 )
snake_case_ : List[Any] = pipe(generator=A__ , steps=4 )
snake_case_ : List[Any] = output.audios[0]
snake_case_ : Dict = output.images[0]
snake_case_ : Dict = torch.Generator(device=A__ ).manual_seed(42 )
snake_case_ : Union[str, Any] = pipe(generator=A__ , steps=4 , return_dict=A__ )
snake_case_ : str = output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
snake_case_ : Optional[int] = np.frombuffer(image.tobytes() , dtype="uint8" )[:10]
snake_case_ : List[str] = np.frombuffer(image_from_tuple.tobytes() , dtype="uint8" )[:10]
snake_case_ : Union[str, Any] = np.array([69, 2_55, 2_55, 2_55, 0, 0, 77, 1_81, 12, 1_27] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0
snake_case_ : List[str] = Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , )
snake_case_ : Optional[int] = DDIMScheduler()
snake_case_ : Union[str, Any] = self.dummy_vqvae_and_unet
snake_case_ : Optional[Any] = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=A__ , scheduler=A__ )
snake_case_ : List[Any] = pipe.to(A__ )
pipe.set_progress_bar_config(disable=A__ )
np.random.seed(0 )
snake_case_ : List[str] = np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) )
snake_case_ : str = torch.Generator(device=A__ ).manual_seed(42 )
snake_case_ : str = pipe(raw_audio=A__ , generator=A__ , start_step=5 , steps=10 )
snake_case_ : int = output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
snake_case_ : str = np.frombuffer(image.tobytes() , dtype="uint8" )[:10]
snake_case_ : List[str] = np.array([1_20, 1_17, 1_10, 1_09, 1_38, 1_67, 1_38, 1_48, 1_32, 1_21] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
snake_case_ : List[str] = self.dummy_unet_condition
snake_case_ : Union[str, Any] = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=A__ , mel=A__ , scheduler=A__ )
snake_case_ : Union[str, Any] = pipe.to(A__ )
pipe.set_progress_bar_config(disable=A__ )
np.random.seed(0 )
snake_case_ : Optional[Any] = torch.rand((1, 1, 10) )
snake_case_ : List[Any] = pipe(generator=A__ , encoding=A__ )
snake_case_ : Optional[int] = output.images[0]
snake_case_ : Dict = np.frombuffer(image.tobytes() , dtype="uint8" )[:10]
snake_case_ : List[str] = np.array([1_07, 1_03, 1_20, 1_27, 1_42, 1_22, 1_13, 1_22, 97, 1_11] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
@slow
@require_torch_gpu
class snake_case__ ( unittest.TestCase ):
def UpperCAmelCase__ ( self : Any ) -> List[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
snake_case_ : Optional[int] = torch_device
snake_case_ : Tuple = DiffusionPipeline.from_pretrained("teticio/audio-diffusion-ddim-256" )
snake_case_ : Dict = pipe.to(A__ )
pipe.set_progress_bar_config(disable=A__ )
snake_case_ : List[Any] = torch.Generator(device=A__ ).manual_seed(42 )
snake_case_ : List[str] = pipe(generator=A__ )
snake_case_ : int = output.audios[0]
snake_case_ : Tuple = output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
snake_case_ : Optional[int] = np.frombuffer(image.tobytes() , dtype="uint8" )[:10]
snake_case_ : Tuple = np.array([1_51, 1_67, 1_54, 1_44, 1_22, 1_34, 1_21, 1_05, 70, 26] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
| 666 | import argparse
import os
from pathlib import Path
import torch
from bark.generation import _load_model as _bark_load_model
from huggingface_hub import hf_hub_download
from transformers import EncodecConfig, EncodecModel, set_seed
from transformers.models.bark.configuration_bark import (
BarkCoarseConfig,
BarkConfig,
BarkFineConfig,
BarkSemanticConfig,
)
from transformers.models.bark.generation_configuration_bark import (
BarkCoarseGenerationConfig,
BarkFineGenerationConfig,
BarkGenerationConfig,
BarkSemanticGenerationConfig,
)
from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase = logging.get_logger(__name__)
set_seed(7_7_0)
UpperCAmelCase = {
"c_attn": "att_proj",
"c_proj": "out_proj",
"c_fc": "in_proj",
"transformer.": "",
"h.": "layers.",
"ln_1": "layernorm_1",
"ln_2": "layernorm_2",
"ln_f": "layernorm_final",
"wpe": "position_embeds_layer",
"wte": "input_embeds_layer",
}
UpperCAmelCase = {
"text_small": {
"repo_id": "suno/bark",
"file_name": "text.pt",
},
"coarse_small": {
"repo_id": "suno/bark",
"file_name": "coarse.pt",
},
"fine_small": {
"repo_id": "suno/bark",
"file_name": "fine.pt",
},
"text": {
"repo_id": "suno/bark",
"file_name": "text_2.pt",
},
"coarse": {
"repo_id": "suno/bark",
"file_name": "coarse_2.pt",
},
"fine": {
"repo_id": "suno/bark",
"file_name": "fine_2.pt",
},
}
UpperCAmelCase = os.path.dirname(os.path.abspath(__file__))
UpperCAmelCase = os.path.join(os.path.expanduser("~"), ".cache")
UpperCAmelCase = os.path.join(os.getenv("XDG_CACHE_HOME", default_cache_dir), "suno", "bark_v0")
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: int , lowerCAmelCase_: List[str]=False ):
snake_case_ : Union[str, Any] = model_type
if use_small:
key += "_small"
return os.path.join(lowerCAmelCase_ , REMOTE_MODEL_PATHS[key]["file_name"] )
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: str , lowerCAmelCase_: List[str] ):
os.makedirs(lowerCAmelCase_ , exist_ok=lowerCAmelCase_ )
hf_hub_download(repo_id=lowerCAmelCase_ , filename=lowerCAmelCase_ , local_dir=lowerCAmelCase_ )
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: Any , lowerCAmelCase_: Dict , lowerCAmelCase_: List[str]=False , lowerCAmelCase_: Dict="text" ):
if model_type == "text":
snake_case_ : int = BarkSemanticModel
snake_case_ : str = BarkSemanticConfig
snake_case_ : Optional[Any] = BarkSemanticGenerationConfig
elif model_type == "coarse":
snake_case_ : str = BarkCoarseModel
snake_case_ : Optional[int] = BarkCoarseConfig
snake_case_ : Any = BarkCoarseGenerationConfig
elif model_type == "fine":
snake_case_ : Optional[int] = BarkFineModel
snake_case_ : Tuple = BarkFineConfig
snake_case_ : List[str] = BarkFineGenerationConfig
else:
raise NotImplementedError()
snake_case_ : Optional[Any] = f"{model_type}_small" if use_small else model_type
snake_case_ : Any = REMOTE_MODEL_PATHS[model_key]
if not os.path.exists(lowerCAmelCase_ ):
logger.info(f"{model_type} model not found, downloading into `{CACHE_DIR}`." )
_download(model_info["repo_id"] , model_info["file_name"] )
snake_case_ : Any = torch.load(lowerCAmelCase_ , map_location=lowerCAmelCase_ )
# this is a hack
snake_case_ : Union[str, Any] = checkpoint["model_args"]
if "input_vocab_size" not in model_args:
snake_case_ : str = model_args["vocab_size"]
snake_case_ : Union[str, Any] = model_args["vocab_size"]
del model_args["vocab_size"]
# convert Bark model arguments to HF Bark model arguments
snake_case_ : Union[str, Any] = model_args.pop("n_head" )
snake_case_ : int = model_args.pop("n_embd" )
snake_case_ : Any = model_args.pop("n_layer" )
snake_case_ : List[str] = ConfigClass(**checkpoint["model_args"] )
snake_case_ : Optional[Any] = ModelClass(config=lowerCAmelCase_ )
snake_case_ : Tuple = GenerationConfigClass()
snake_case_ : List[str] = model_generation_config
snake_case_ : Optional[int] = checkpoint["model"]
# fixup checkpoint
snake_case_ : Optional[int] = "_orig_mod."
for k, v in list(state_dict.items() ):
if k.startswith(lowerCAmelCase_ ):
# replace part of the key with corresponding layer name in HF implementation
snake_case_ : Tuple = k[len(lowerCAmelCase_ ) :]
for old_layer_name in new_layer_name_dict:
snake_case_ : int = new_k.replace(lowerCAmelCase_ , new_layer_name_dict[old_layer_name] )
snake_case_ : int = state_dict.pop(lowerCAmelCase_ )
snake_case_ : Optional[int] = set(state_dict.keys() ) - set(model.state_dict().keys() )
snake_case_ : str = {k for k in extra_keys if not k.endswith(".attn.bias" )}
snake_case_ : Any = set(model.state_dict().keys() ) - set(state_dict.keys() )
snake_case_ : List[Any] = {k for k in missing_keys if not k.endswith(".attn.bias" )}
if len(lowerCAmelCase_ ) != 0:
raise ValueError(f"extra keys found: {extra_keys}" )
if len(lowerCAmelCase_ ) != 0:
raise ValueError(f"missing keys: {missing_keys}" )
model.load_state_dict(lowerCAmelCase_ , strict=lowerCAmelCase_ )
snake_case_ : str = model.num_parameters(exclude_embeddings=lowerCAmelCase_ )
snake_case_ : Union[str, Any] = checkpoint["best_val_loss"].item()
logger.info(f"model loaded: {round(n_params/1e6 , 1 )}M params, {round(lowerCAmelCase_ , 3 )} loss" )
model.eval()
model.to(lowerCAmelCase_ )
del checkpoint, state_dict
return model
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: List[Any] , lowerCAmelCase_: str=False , lowerCAmelCase_: int="text" ):
if model_type not in ("text", "coarse", "fine"):
raise NotImplementedError()
snake_case_ : int = "cpu" # do conversion on cpu
snake_case_ : Optional[Any] = _get_ckpt_path(lowerCAmelCase_ , use_small=lowerCAmelCase_ )
snake_case_ : Tuple = _load_model(lowerCAmelCase_ , lowerCAmelCase_ , model_type=lowerCAmelCase_ , use_small=lowerCAmelCase_ )
# load bark initial model
snake_case_ : int = _bark_load_model(lowerCAmelCase_ , "cpu" , model_type=lowerCAmelCase_ , use_small=lowerCAmelCase_ )
if model_type == "text":
snake_case_ : Union[str, Any] = bark_model["model"]
if model.num_parameters(exclude_embeddings=lowerCAmelCase_ ) != bark_model.get_num_params():
raise ValueError("initial and new models don't have the same number of parameters" )
# check if same output as the bark model
snake_case_ : Optional[Any] = 5
snake_case_ : Optional[int] = 1_0
if model_type in ["text", "coarse"]:
snake_case_ : Optional[Any] = torch.randint(2_5_6 , (batch_size, sequence_length) , dtype=torch.int )
snake_case_ : str = bark_model(lowerCAmelCase_ )[0]
snake_case_ : Tuple = model(lowerCAmelCase_ )
# take last logits
snake_case_ : List[str] = output_new_model_total.logits[:, [-1], :]
else:
snake_case_ : Optional[int] = 3
snake_case_ : str = 8
snake_case_ : List[str] = torch.randint(2_5_6 , (batch_size, sequence_length, n_codes_total) , dtype=torch.int )
snake_case_ : Any = model(lowerCAmelCase_ , lowerCAmelCase_ )
snake_case_ : Union[str, Any] = bark_model(lowerCAmelCase_ , lowerCAmelCase_ )
snake_case_ : Optional[int] = output_new_model_total.logits
# output difference should come from the difference of self-attention implementation design
if output_new_model.shape != output_old_model.shape:
raise ValueError("initial and new outputs don't have the same shape" )
if (output_new_model - output_old_model).abs().max().item() > 1e-3:
raise ValueError("initial and new outputs are not equal" )
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
model.save_pretrained(lowerCAmelCase_ )
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: Tuple , lowerCAmelCase_: List[str] , lowerCAmelCase_: Any , lowerCAmelCase_: List[Any] , lowerCAmelCase_: int , lowerCAmelCase_: Optional[Any] , ):
snake_case_ : Optional[Any] = os.path.join(lowerCAmelCase_ , lowerCAmelCase_ )
snake_case_ : Optional[Any] = BarkSemanticConfig.from_pretrained(os.path.join(lowerCAmelCase_ , "config.json" ) )
snake_case_ : List[Any] = BarkCoarseConfig.from_pretrained(os.path.join(lowerCAmelCase_ , "config.json" ) )
snake_case_ : List[str] = BarkFineConfig.from_pretrained(os.path.join(lowerCAmelCase_ , "config.json" ) )
snake_case_ : List[Any] = EncodecConfig.from_pretrained("facebook/encodec_24khz" )
snake_case_ : List[str] = BarkSemanticModel.from_pretrained(lowerCAmelCase_ )
snake_case_ : Optional[Any] = BarkCoarseModel.from_pretrained(lowerCAmelCase_ )
snake_case_ : Tuple = BarkFineModel.from_pretrained(lowerCAmelCase_ )
snake_case_ : Union[str, Any] = EncodecModel.from_pretrained("facebook/encodec_24khz" )
snake_case_ : Tuple = BarkConfig.from_sub_model_configs(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
snake_case_ : List[Any] = BarkGenerationConfig.from_sub_model_configs(
semantic.generation_config , coarseAcoustic.generation_config , fineAcoustic.generation_config )
snake_case_ : Optional[int] = BarkModel(lowerCAmelCase_ )
snake_case_ : int = semantic
snake_case_ : List[str] = coarseAcoustic
snake_case_ : str = fineAcoustic
snake_case_ : Optional[Any] = codec
snake_case_ : Any = bark_generation_config
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
bark.save_pretrained(lowerCAmelCase_ , repo_id=lowerCAmelCase_ , push_to_hub=lowerCAmelCase_ )
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument("model_type", type=str, help="text, coarse or fine.")
parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--is_small", action="store_true", help="convert the small version instead of the large.")
UpperCAmelCase = parser.parse_args()
load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small)
| 666 | 1 |
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
UpperCAmelCase = logging.get_logger(__name__)
class snake_case__ ( _UpperCamelCase ):
def __init__( self : Dict , *A__ : Dict , **A__ : Dict ) -> None:
'''simple docstring'''
warnings.warn(
"The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use DonutImageProcessor instead." , A__ , )
super().__init__(*A__ , **A__ )
| 666 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase = {
"configuration_upernet": ["UperNetConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
"UperNetForSemanticSegmentation",
"UperNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_upernet import UperNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_upernet import UperNetForSemanticSegmentation, UperNetPreTrainedModel
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 666 | 1 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.linear_k": "encoder.layers.*.self_attn.linear_k",
"self_attn.linear_v": "encoder.layers.*.self_attn.linear_v",
"self_attn.linear_q": "encoder.layers.*.self_attn.linear_q",
"self_attn.pos_bias_u": "encoder.layers.*.self_attn.pos_bias_u",
"self_attn.pos_bias_v": "encoder.layers.*.self_attn.pos_bias_v",
"self_attn.linear_out": "encoder.layers.*.self_attn.linear_out",
"self_attn.linear_pos": "encoder.layers.*.self_attn.linear_pos",
"self_attn.rotary_emb": "encoder.embed_positions",
"self_attn_layer_norm": "encoder.layers.*.self_attn_layer_norm",
"conv_module.pointwise_conv1": "encoder.layers.*.conv_module.pointwise_conv1",
"conv_module.pointwise_conv2": "encoder.layers.*.conv_module.pointwise_conv2",
"conv_module.depthwise_conv": "encoder.layers.*.conv_module.depthwise_conv",
"conv_module.batch_norm": "encoder.layers.*.conv_module.batch_norm",
"conv_module.layer_norm": "encoder.layers.*.conv_module.layer_norm",
"ffn1.w_1": "encoder.layers.*.ffn1.intermediate_dense",
"ffn1.w_2": "encoder.layers.*.ffn1.output_dense",
"ffn1.layer_norm": "encoder.layers.*.ffn1_layer_norm",
"ffn2.w_1": "encoder.layers.*.ffn2.intermediate_dense",
"ffn2.w_2": "encoder.layers.*.ffn2.output_dense",
"ffn2.layer_norm": "encoder.layers.*.ffn2_layer_norm",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
UpperCAmelCase = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: Tuple , lowerCAmelCase_: Union[str, Any] , lowerCAmelCase_: Dict , lowerCAmelCase_: Optional[int] , lowerCAmelCase_: List[Any] ):
for attribute in key.split("." ):
snake_case_ : List[str] = getattr(lowerCAmelCase_ , lowerCAmelCase_ )
if weight_type is not None:
snake_case_ : List[Any] = getattr(lowerCAmelCase_ , lowerCAmelCase_ ).shape
else:
snake_case_ : List[Any] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
f" {value.shape} for {full_name}" )
if weight_type == "weight":
snake_case_ : Optional[Any] = value
elif weight_type == "weight_g":
snake_case_ : Tuple = value
elif weight_type == "weight_v":
snake_case_ : List[Any] = value
elif weight_type == "bias":
snake_case_ : Tuple = value
elif weight_type == "running_mean":
snake_case_ : str = value
elif weight_type == "running_var":
snake_case_ : Union[str, Any] = value
elif weight_type == "num_batches_tracked":
snake_case_ : Any = value
elif weight_type == "inv_freq":
snake_case_ : Any = value
else:
snake_case_ : Optional[int] = value
logger.info(f"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: int , lowerCAmelCase_: List[str] , lowerCAmelCase_: str ):
snake_case_ : Union[str, Any] = []
snake_case_ : List[Any] = fairseq_model.state_dict()
snake_case_ : List[Any] = hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
snake_case_ : Dict = False
if "conv_layers" in name:
load_conv_layer(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , hf_model.config.feat_extract_norm == "group" , )
snake_case_ : Optional[Any] = True
else:
for key, mapped_key in MAPPING.items():
snake_case_ : Any = "wav2vec2_conformer." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
snake_case_ : int = True
if "*" in mapped_key:
snake_case_ : int = name.split(lowerCAmelCase_ )[0].split("." )[-2]
snake_case_ : str = mapped_key.replace("*" , lowerCAmelCase_ )
if "pos_bias_u" in name:
snake_case_ : Union[str, Any] = None
elif "pos_bias_v" in name:
snake_case_ : str = None
elif "weight_g" in name:
snake_case_ : Any = "weight_g"
elif "weight_v" in name:
snake_case_ : Optional[Any] = "weight_v"
elif "bias" in name:
snake_case_ : Dict = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
snake_case_ : Dict = "weight"
elif "running_mean" in name:
snake_case_ : Dict = "running_mean"
elif "inv_freq" in name:
snake_case_ : List[Any] = "inv_freq"
elif "running_var" in name:
snake_case_ : Optional[Any] = "running_var"
elif "num_batches_tracked" in name:
snake_case_ : Any = "num_batches_tracked"
else:
snake_case_ : Union[str, Any] = None
set_recursively(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
continue
if not is_used:
unused_weights.append(lowerCAmelCase_ )
logger.warning(f"Unused weights: {unused_weights}" )
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: int , lowerCAmelCase_: Optional[int] , lowerCAmelCase_: List[Any] , lowerCAmelCase_: Union[str, Any] , lowerCAmelCase_: Dict ):
snake_case_ : Dict = full_name.split("conv_layers." )[-1]
snake_case_ : List[Any] = name.split("." )
snake_case_ : Dict = int(items[0] )
snake_case_ : List[Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." )
snake_case_ : Union[str, Any] = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." )
snake_case_ : Optional[int] = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found." )
snake_case_ : Dict = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found." )
snake_case_ : int = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(lowerCAmelCase_ )
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: str , lowerCAmelCase_: Optional[int] , lowerCAmelCase_: List[Any]=None , lowerCAmelCase_: Optional[int]=None , lowerCAmelCase_: List[Any]=True ):
if config_path is not None:
snake_case_ : Optional[Any] = WavaVecaConformerConfig.from_pretrained(lowerCAmelCase_ , hidden_act="swish" )
else:
snake_case_ : Optional[int] = WavaVecaConformerConfig()
if "rope" in checkpoint_path:
snake_case_ : Union[str, Any] = "rotary"
if is_finetuned:
if dict_path:
snake_case_ : Dict = Dictionary.load(lowerCAmelCase_ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
snake_case_ : List[str] = target_dict.pad_index
snake_case_ : List[Any] = target_dict.bos_index
snake_case_ : str = target_dict.eos_index
snake_case_ : Optional[int] = len(target_dict.symbols )
snake_case_ : Optional[int] = os.path.join(lowerCAmelCase_ , "vocab.json" )
if not os.path.isdir(lowerCAmelCase_ ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(lowerCAmelCase_ ) )
return
os.makedirs(lowerCAmelCase_ , exist_ok=lowerCAmelCase_ )
snake_case_ : Dict = target_dict.indices
# fairseq has the <pad> and <s> switched
snake_case_ : List[Any] = 0
snake_case_ : Dict = 1
with open(lowerCAmelCase_ , "w" , encoding="utf-8" ) as vocab_handle:
json.dump(lowerCAmelCase_ , lowerCAmelCase_ )
snake_case_ : List[str] = WavaVecaCTCTokenizer(
lowerCAmelCase_ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=lowerCAmelCase_ , )
snake_case_ : Any = True if config.feat_extract_norm == "layer" else False
snake_case_ : int = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , )
snake_case_ : List[Any] = WavaVecaProcessor(feature_extractor=lowerCAmelCase_ , tokenizer=lowerCAmelCase_ )
processor.save_pretrained(lowerCAmelCase_ )
snake_case_ : List[str] = WavaVecaConformerForCTC(lowerCAmelCase_ )
else:
snake_case_ : List[Any] = WavaVecaConformerForPreTraining(lowerCAmelCase_ )
if is_finetuned:
snake_case_ ,snake_case_ ,snake_case_ : Optional[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
else:
snake_case_ : Optional[Any] = argparse.Namespace(task="audio_pretraining" )
snake_case_ : Union[str, Any] = fairseq.tasks.setup_task(lowerCAmelCase_ )
snake_case_ ,snake_case_ ,snake_case_ : str = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=lowerCAmelCase_ )
snake_case_ : Optional[Any] = model[0].eval()
recursively_load_weights(lowerCAmelCase_ , lowerCAmelCase_ , not is_finetuned )
hf_wavavec.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
UpperCAmelCase = parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 666 | from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
UpperCAmelCase = logging.get_logger(__name__)
class snake_case__ ( _UpperCamelCase ):
_SCREAMING_SNAKE_CASE : str = ["pixel_values"]
def __init__( self : List[Any] , A__ : bool = True , A__ : Optional[Dict[str, int]] = None , A__ : PILImageResampling = PILImageResampling.BILINEAR , A__ : bool = True , A__ : Dict[str, int] = None , A__ : bool = True , A__ : Union[int, float] = 1 / 2_55 , A__ : bool = True , A__ : Optional[Union[float, List[float]]] = None , A__ : Optional[Union[float, List[float]]] = None , **A__ : int , ) -> None:
'''simple docstring'''
super().__init__(**A__ )
snake_case_ : Optional[int] = size if size is not None else {"shortest_edge": 2_56}
snake_case_ : Dict = get_size_dict(A__ , default_to_square=A__ )
snake_case_ : List[str] = crop_size if crop_size is not None else {"height": 2_24, "width": 2_24}
snake_case_ : Any = get_size_dict(A__ , param_name="crop_size" )
snake_case_ : int = do_resize
snake_case_ : Optional[Any] = size
snake_case_ : Optional[Any] = resample
snake_case_ : Optional[int] = do_center_crop
snake_case_ : List[Any] = crop_size
snake_case_ : List[Any] = do_rescale
snake_case_ : Optional[int] = rescale_factor
snake_case_ : Optional[Any] = do_normalize
snake_case_ : List[Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
snake_case_ : Optional[Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCAmelCase__ ( self : List[str] , A__ : np.ndarray , A__ : Dict[str, int] , A__ : PILImageResampling = PILImageResampling.BICUBIC , A__ : Optional[Union[str, ChannelDimension]] = None , **A__ : str , ) -> np.ndarray:
'''simple docstring'''
snake_case_ : Optional[Any] = get_size_dict(A__ , default_to_square=A__ )
if "shortest_edge" not in size:
raise ValueError(f"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}" )
snake_case_ : Any = get_resize_output_image_size(A__ , size=size["shortest_edge"] , default_to_square=A__ )
return resize(A__ , size=A__ , resample=A__ , data_format=A__ , **A__ )
def UpperCAmelCase__ ( self : int , A__ : np.ndarray , A__ : Dict[str, int] , A__ : Optional[Union[str, ChannelDimension]] = None , **A__ : Union[str, Any] , ) -> np.ndarray:
'''simple docstring'''
snake_case_ : Tuple = get_size_dict(A__ )
if "height" not in size or "width" not in size:
raise ValueError(f"The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}" )
return center_crop(A__ , size=(size["height"], size["width"]) , data_format=A__ , **A__ )
def UpperCAmelCase__ ( self : List[str] , A__ : np.ndarray , A__ : float , A__ : Optional[Union[str, ChannelDimension]] = None , **A__ : Tuple ) -> np.ndarray:
'''simple docstring'''
return rescale(A__ , scale=A__ , data_format=A__ , **A__ )
def UpperCAmelCase__ ( self : Tuple , A__ : np.ndarray , A__ : Union[float, List[float]] , A__ : Union[float, List[float]] , A__ : Optional[Union[str, ChannelDimension]] = None , **A__ : Dict , ) -> np.ndarray:
'''simple docstring'''
return normalize(A__ , mean=A__ , std=A__ , data_format=A__ , **A__ )
def UpperCAmelCase__ ( self : Union[str, Any] , A__ : ImageInput , A__ : Optional[bool] = None , A__ : Dict[str, int] = None , A__ : PILImageResampling = None , A__ : bool = None , A__ : Dict[str, int] = None , A__ : Optional[bool] = None , A__ : Optional[float] = None , A__ : Optional[bool] = None , A__ : Optional[Union[float, List[float]]] = None , A__ : Optional[Union[float, List[float]]] = None , A__ : Optional[Union[str, TensorType]] = None , A__ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **A__ : Union[str, Any] , ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Union[str, Any] = do_resize if do_resize is not None else self.do_resize
snake_case_ : Dict = size if size is not None else self.size
snake_case_ : Optional[Any] = get_size_dict(A__ , default_to_square=A__ )
snake_case_ : Tuple = resample if resample is not None else self.resample
snake_case_ : Union[str, Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
snake_case_ : str = crop_size if crop_size is not None else self.crop_size
snake_case_ : Tuple = get_size_dict(A__ , param_name="crop_size" )
snake_case_ : Dict = do_rescale if do_rescale is not None else self.do_rescale
snake_case_ : str = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case_ : Any = do_normalize if do_normalize is not None else self.do_normalize
snake_case_ : Any = image_mean if image_mean is not None else self.image_mean
snake_case_ : List[str] = image_std if image_std is not None else self.image_std
snake_case_ : Dict = make_list_of_images(A__ )
if not valid_images(A__ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
snake_case_ : Tuple = [to_numpy_array(A__ ) for image in images]
if do_resize:
snake_case_ : Any = [self.resize(image=A__ , size=A__ , resample=A__ ) for image in images]
if do_center_crop:
snake_case_ : List[str] = [self.center_crop(image=A__ , size=A__ ) for image in images]
if do_rescale:
snake_case_ : Any = [self.rescale(image=A__ , scale=A__ ) for image in images]
if do_normalize:
snake_case_ : Union[str, Any] = [self.normalize(image=A__ , mean=A__ , std=A__ ) for image in images]
snake_case_ : Optional[Any] = [to_channel_dimension_format(A__ , A__ ) for image in images]
snake_case_ : Any = {"pixel_values": images}
return BatchFeature(data=A__ , tensor_type=A__ )
def UpperCAmelCase__ ( self : List[str] , A__ : Dict , A__ : List[Tuple] = None ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Tuple = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(A__ ) != len(A__ ):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits" )
if is_torch_tensor(A__ ):
snake_case_ : Dict = target_sizes.numpy()
snake_case_ : int = []
for idx in range(len(A__ ) ):
snake_case_ : List[str] = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="bilinear" , align_corners=A__ )
snake_case_ : int = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(A__ )
else:
snake_case_ : List[Any] = logits.argmax(dim=1 )
snake_case_ : List[Any] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 666 | 1 |
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
UpperCAmelCase = {
"vocab_file": {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/vocab.json",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/vocab.json",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/vocab.json",
"roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json",
"roberta-large-openai-detector": (
"https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json"
),
},
"merges_file": {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/merges.txt",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/merges.txt",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/merges.txt",
"roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt",
"roberta-large-openai-detector": (
"https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt"
),
},
"tokenizer_file": {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/tokenizer.json",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/tokenizer.json",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json",
"roberta-base-openai-detector": (
"https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json"
),
"roberta-large-openai-detector": (
"https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json"
),
},
}
UpperCAmelCase = {
"roberta-base": 5_1_2,
"roberta-large": 5_1_2,
"roberta-large-mnli": 5_1_2,
"distilroberta-base": 5_1_2,
"roberta-base-openai-detector": 5_1_2,
"roberta-large-openai-detector": 5_1_2,
}
class snake_case__ ( _UpperCamelCase ):
_SCREAMING_SNAKE_CASE : Dict = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE : Any = PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_SCREAMING_SNAKE_CASE : int = ["input_ids", "attention_mask"]
_SCREAMING_SNAKE_CASE : List[str] = RobertaTokenizer
def __init__( self : Optional[int] , A__ : List[Any]=None , A__ : Optional[int]=None , A__ : List[str]=None , A__ : Dict="replace" , A__ : List[str]="<s>" , A__ : Optional[Any]="</s>" , A__ : List[str]="</s>" , A__ : List[Any]="<s>" , A__ : int="<unk>" , A__ : int="<pad>" , A__ : List[Any]="<mask>" , A__ : Any=False , A__ : Optional[int]=True , **A__ : Union[str, Any] , ) -> int:
'''simple docstring'''
super().__init__(
A__ , A__ , tokenizer_file=A__ , errors=A__ , bos_token=A__ , eos_token=A__ , sep_token=A__ , cls_token=A__ , unk_token=A__ , pad_token=A__ , mask_token=A__ , add_prefix_space=A__ , trim_offsets=A__ , **A__ , )
snake_case_ : Dict = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , A__ ) != add_prefix_space:
snake_case_ : List[Any] = getattr(A__ , pre_tok_state.pop("type" ) )
snake_case_ : Any = add_prefix_space
snake_case_ : List[Any] = pre_tok_class(**A__ )
snake_case_ : Optional[int] = add_prefix_space
snake_case_ : List[str] = "post_processor"
snake_case_ : Tuple = getattr(self.backend_tokenizer , A__ , A__ )
if tokenizer_component_instance:
snake_case_ : List[str] = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
snake_case_ : str = tuple(state["sep"] )
if "cls" in state:
snake_case_ : Tuple = tuple(state["cls"] )
snake_case_ : Tuple = False
if state.get("add_prefix_space" , A__ ) != add_prefix_space:
snake_case_ : Optional[Any] = add_prefix_space
snake_case_ : str = True
if state.get("trim_offsets" , A__ ) != trim_offsets:
snake_case_ : Optional[int] = trim_offsets
snake_case_ : List[Any] = True
if changes_to_apply:
snake_case_ : int = getattr(A__ , state.pop("type" ) )
snake_case_ : List[Any] = component_class(**A__ )
setattr(self.backend_tokenizer , A__ , A__ )
@property
def UpperCAmelCase__ ( self : Optional[Any] ) -> str:
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def UpperCAmelCase__ ( self : Tuple , A__ : Dict ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Any = AddedToken(A__ , lstrip=A__ , rstrip=A__ ) if isinstance(A__ , A__ ) else value
snake_case_ : Any = value
def UpperCAmelCase__ ( self : int , *A__ : Optional[Any] , **A__ : int ) -> BatchEncoding:
'''simple docstring'''
snake_case_ : Optional[Any] = kwargs.get("is_split_into_words" , A__ )
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*A__ , **A__ )
def UpperCAmelCase__ ( self : Union[str, Any] , *A__ : Any , **A__ : List[Any] ) -> BatchEncoding:
'''simple docstring'''
snake_case_ : Optional[int] = kwargs.get("is_split_into_words" , A__ )
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._encode_plus(*A__ , **A__ )
def UpperCAmelCase__ ( self : Tuple , A__ : str , A__ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
snake_case_ : Optional[Any] = self._tokenizer.model.save(A__ , name=A__ )
return tuple(A__ )
def UpperCAmelCase__ ( self : int , A__ : List[str] , A__ : Union[str, Any]=None ) -> Any:
'''simple docstring'''
snake_case_ : List[str] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def UpperCAmelCase__ ( self : Dict , A__ : List[int] , A__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
snake_case_ : str = [self.sep_token_id]
snake_case_ : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 666 | from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase = {
"configuration_blenderbot": [
"BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlenderbotConfig",
"BlenderbotOnnxConfig",
],
"tokenization_blenderbot": ["BlenderbotTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = ["BlenderbotTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
"BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlenderbotForCausalLM",
"BlenderbotForConditionalGeneration",
"BlenderbotModel",
"BlenderbotPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
"TFBlenderbotForConditionalGeneration",
"TFBlenderbotModel",
"TFBlenderbotPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
"FlaxBlenderbotForConditionalGeneration",
"FlaxBlenderbotModel",
"FlaxBlenderbotPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 666 | 1 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {"vocab_file": "sentencepiece.bpe.model"}
UpperCAmelCase = {
"vocab_file": {
"moussaKam/mbarthez": "https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model",
"moussaKam/barthez": "https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model",
"moussaKam/barthez-orangesum-title": (
"https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model"
),
},
}
UpperCAmelCase = {
"moussaKam/mbarthez": 1_0_2_4,
"moussaKam/barthez": 1_0_2_4,
"moussaKam/barthez-orangesum-title": 1_0_2_4,
}
UpperCAmelCase = "▁"
class snake_case__ ( _UpperCamelCase ):
_SCREAMING_SNAKE_CASE : List[str] = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_SCREAMING_SNAKE_CASE : Tuple = ["input_ids", "attention_mask"]
def __init__( self : List[Any] , A__ : Optional[Any] , A__ : str="<s>" , A__ : int="</s>" , A__ : List[str]="</s>" , A__ : List[str]="<s>" , A__ : Union[str, Any]="<unk>" , A__ : Dict="<pad>" , A__ : Union[str, Any]="<mask>" , A__ : Optional[Dict[str, Any]] = None , **A__ : Optional[int] , ) -> None:
'''simple docstring'''
snake_case_ : Dict = AddedToken(A__ , lstrip=A__ , rstrip=A__ ) if isinstance(A__ , A__ ) else mask_token
snake_case_ : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=A__ , eos_token=A__ , unk_token=A__ , sep_token=A__ , cls_token=A__ , pad_token=A__ , mask_token=A__ , sp_model_kwargs=self.sp_model_kwargs , **A__ , )
snake_case_ : Union[str, Any] = vocab_file
snake_case_ : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(A__ ) )
snake_case_ : Optional[Any] = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
snake_case_ : List[str] = len(self.sp_model ) - 1
snake_case_ : int = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def UpperCAmelCase__ ( self : Any , A__ : List[int] , A__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
snake_case_ : Any = [self.cls_token_id]
snake_case_ : Dict = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCAmelCase__ ( self : Optional[Any] , A__ : List[int] , A__ : Optional[List[int]] = None , A__ : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A__ , token_ids_a=A__ , already_has_special_tokens=A__ )
if token_ids_a is None:
return [1] + ([0] * len(A__ )) + [1]
return [1] + ([0] * len(A__ )) + [1, 1] + ([0] * len(A__ )) + [1]
def UpperCAmelCase__ ( self : List[Any] , A__ : List[int] , A__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
snake_case_ : Union[str, Any] = [self.sep_token_id]
snake_case_ : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def UpperCAmelCase__ ( self : List[Any] ) -> int:
'''simple docstring'''
return len(self.sp_model )
def UpperCAmelCase__ ( self : Optional[Any] ) -> int:
'''simple docstring'''
snake_case_ : List[str] = {self.convert_ids_to_tokens(A__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCAmelCase__ ( self : Optional[Any] , A__ : str ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(A__ , out_type=A__ )
def UpperCAmelCase__ ( self : List[Any] , A__ : Any ) -> Union[str, Any]:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
snake_case_ : Optional[Any] = self.sp_model.PieceToId(A__ )
return spm_id if spm_id else self.unk_token_id
def UpperCAmelCase__ ( self : Dict , A__ : Dict ) -> Optional[Any]:
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(A__ )
def UpperCAmelCase__ ( self : Any , A__ : List[Any] ) -> Any:
'''simple docstring'''
snake_case_ : int = []
snake_case_ : int = ""
snake_case_ : int = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(A__ ) + token
snake_case_ : int = True
snake_case_ : Union[str, Any] = []
else:
current_sub_tokens.append(A__ )
snake_case_ : Dict = False
out_string += self.sp_model.decode(A__ )
return out_string.strip()
def __getstate__( self : Optional[int] ) -> Dict:
'''simple docstring'''
snake_case_ : Tuple = self.__dict__.copy()
snake_case_ : str = None
return state
def __setstate__( self : Union[str, Any] , A__ : Any ) -> Any:
'''simple docstring'''
snake_case_ : List[str] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
snake_case_ : Optional[int] = {}
snake_case_ : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCAmelCase__ ( self : Optional[Any] , A__ : str , A__ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(A__ ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
snake_case_ : str = os.path.join(
A__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , A__ )
elif not os.path.isfile(self.vocab_file ):
with open(A__ , "wb" ) as fi:
snake_case_ : List[Any] = self.sp_model.serialized_model_proto()
fi.write(A__ )
return (out_vocab_file,)
| 666 | from ...configuration_utils import PretrainedConfig
UpperCAmelCase = {
"google/tapas-base-finetuned-sqa": (
"https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json"
),
"google/tapas-base-finetuned-wtq": (
"https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json"
),
"google/tapas-base-finetuned-wikisql-supervised": (
"https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json"
),
"google/tapas-base-finetuned-tabfact": (
"https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json"
),
}
class snake_case__ ( _UpperCamelCase ):
_SCREAMING_SNAKE_CASE : Dict = "tapas"
def __init__( self : List[Any] , A__ : str=3_05_22 , A__ : Tuple=7_68 , A__ : List[Any]=12 , A__ : Optional[Any]=12 , A__ : Union[str, Any]=30_72 , A__ : Dict="gelu" , A__ : List[Any]=0.1 , A__ : str=0.1 , A__ : List[Any]=10_24 , A__ : Optional[int]=[3, 2_56, 2_56, 2, 2_56, 2_56, 10] , A__ : Union[str, Any]=0.02 , A__ : Tuple=1E-12 , A__ : Tuple=0 , A__ : Any=10.0 , A__ : List[str]=0 , A__ : List[str]=1.0 , A__ : Optional[Any]=None , A__ : Tuple=1.0 , A__ : Union[str, Any]=False , A__ : Any=None , A__ : Union[str, Any]=1.0 , A__ : int=1.0 , A__ : str=False , A__ : int=False , A__ : Optional[Any]="ratio" , A__ : str=None , A__ : int=None , A__ : Dict=64 , A__ : int=32 , A__ : Optional[Any]=False , A__ : List[str]=True , A__ : List[Any]=False , A__ : str=False , A__ : Any=True , A__ : Tuple=False , A__ : str=None , A__ : str=None , **A__ : List[str] , ) -> List[str]:
'''simple docstring'''
super().__init__(pad_token_id=A__ , **A__ )
# BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes)
snake_case_ : int = vocab_size
snake_case_ : int = hidden_size
snake_case_ : Optional[Any] = num_hidden_layers
snake_case_ : int = num_attention_heads
snake_case_ : Optional[int] = hidden_act
snake_case_ : Optional[int] = intermediate_size
snake_case_ : str = hidden_dropout_prob
snake_case_ : Dict = attention_probs_dropout_prob
snake_case_ : Any = max_position_embeddings
snake_case_ : List[Any] = type_vocab_sizes
snake_case_ : str = initializer_range
snake_case_ : Optional[Any] = layer_norm_eps
# Fine-tuning task hyperparameters
snake_case_ : Optional[int] = positive_label_weight
snake_case_ : Dict = num_aggregation_labels
snake_case_ : List[str] = aggregation_loss_weight
snake_case_ : str = use_answer_as_supervision
snake_case_ : int = answer_loss_importance
snake_case_ : Any = use_normalized_answer_loss
snake_case_ : int = huber_loss_delta
snake_case_ : List[Any] = temperature
snake_case_ : str = aggregation_temperature
snake_case_ : List[str] = use_gumbel_for_cells
snake_case_ : List[str] = use_gumbel_for_aggregation
snake_case_ : Dict = average_approximation_function
snake_case_ : List[str] = cell_selection_preference
snake_case_ : Dict = answer_loss_cutoff
snake_case_ : List[str] = max_num_rows
snake_case_ : Union[str, Any] = max_num_columns
snake_case_ : str = average_logits_per_cell
snake_case_ : Union[str, Any] = select_one_column
snake_case_ : Dict = allow_empty_column_selection
snake_case_ : List[Any] = init_cell_selection_weights_to_zero
snake_case_ : str = reset_position_index_per_cell
snake_case_ : List[Any] = disable_per_token_loss
# Aggregation hyperparameters
snake_case_ : List[str] = aggregation_labels
snake_case_ : Union[str, Any] = no_aggregation_label_index
if isinstance(self.aggregation_labels , A__ ):
snake_case_ : Optional[int] = {int(A__ ): v for k, v in aggregation_labels.items()}
| 666 | 1 |
from math import pi, sqrt, tan
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: float ):
if side_length < 0:
raise ValueError("surface_area_cube() only accepts non-negative values" )
return 6 * side_length**2
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: float , lowerCAmelCase_: float , lowerCAmelCase_: float ):
if length < 0 or breadth < 0 or height < 0:
raise ValueError("surface_area_cuboid() only accepts non-negative values" )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: float ):
if radius < 0:
raise ValueError("surface_area_sphere() only accepts non-negative values" )
return 4 * pi * radius**2
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: float ):
if radius < 0:
raise ValueError("surface_area_hemisphere() only accepts non-negative values" )
return 3 * pi * radius**2
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: float , lowerCAmelCase_: float ):
if radius < 0 or height < 0:
raise ValueError("surface_area_cone() only accepts non-negative values" )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: float , lowerCAmelCase_: float , lowerCAmelCase_: float ):
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
"surface_area_conical_frustum() only accepts non-negative values" )
snake_case_ : str = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: float , lowerCAmelCase_: float ):
if radius < 0 or height < 0:
raise ValueError("surface_area_cylinder() only accepts non-negative values" )
return 2 * pi * radius * (height + radius)
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: float , lowerCAmelCase_: float ):
if torus_radius < 0 or tube_radius < 0:
raise ValueError("surface_area_torus() only accepts non-negative values" )
if torus_radius < tube_radius:
raise ValueError(
"surface_area_torus() does not support spindle or self intersecting tori" )
return 4 * pow(lowerCAmelCase_ , 2 ) * torus_radius * tube_radius
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: float , lowerCAmelCase_: float ):
if length < 0 or width < 0:
raise ValueError("area_rectangle() only accepts non-negative values" )
return length * width
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: float ):
if side_length < 0:
raise ValueError("area_square() only accepts non-negative values" )
return side_length**2
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: float , lowerCAmelCase_: float ):
if base < 0 or height < 0:
raise ValueError("area_triangle() only accepts non-negative values" )
return (base * height) / 2
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: float , lowerCAmelCase_: float , lowerCAmelCase_: float ):
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError("area_triangle_three_sides() only accepts non-negative values" )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError("Given three sides do not form a triangle" )
snake_case_ : int = (sidea + sidea + sidea) / 2
snake_case_ : List[Any] = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: float , lowerCAmelCase_: float ):
if base < 0 or height < 0:
raise ValueError("area_parallelogram() only accepts non-negative values" )
return base * height
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: float , lowerCAmelCase_: float , lowerCAmelCase_: float ):
if basea < 0 or basea < 0 or height < 0:
raise ValueError("area_trapezium() only accepts non-negative values" )
return 1 / 2 * (basea + basea) * height
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: float ):
if radius < 0:
raise ValueError("area_circle() only accepts non-negative values" )
return pi * radius**2
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: float , lowerCAmelCase_: float ):
if radius_x < 0 or radius_y < 0:
raise ValueError("area_ellipse() only accepts non-negative values" )
return pi * radius_x * radius_y
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: float , lowerCAmelCase_: float ):
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError("area_rhombus() only accepts non-negative values" )
return 1 / 2 * diagonal_a * diagonal_a
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: int , lowerCAmelCase_: float ):
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) or sides < 3:
raise ValueError(
"area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides" )
elif length < 0:
raise ValueError(
"area_reg_polygon() only accepts non-negative values as \
length of a side" )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print("[DEMO] Areas of various geometric shapes: \n")
print(F"Rectangle: {area_rectangle(1_0, 2_0) = }")
print(F"Square: {area_square(1_0) = }")
print(F"Triangle: {area_triangle(1_0, 1_0) = }")
print(F"Triangle: {area_triangle_three_sides(5, 1_2, 1_3) = }")
print(F"Parallelogram: {area_parallelogram(1_0, 2_0) = }")
print(F"Rhombus: {area_rhombus(1_0, 2_0) = }")
print(F"Trapezium: {area_trapezium(1_0, 2_0, 3_0) = }")
print(F"Circle: {area_circle(2_0) = }")
print(F"Ellipse: {area_ellipse(1_0, 2_0) = }")
print("\nSurface Areas of various geometric shapes: \n")
print(F"Cube: {surface_area_cube(2_0) = }")
print(F"Cuboid: {surface_area_cuboid(1_0, 2_0, 3_0) = }")
print(F"Sphere: {surface_area_sphere(2_0) = }")
print(F"Hemisphere: {surface_area_hemisphere(2_0) = }")
print(F"Cone: {surface_area_cone(1_0, 2_0) = }")
print(F"Conical Frustum: {surface_area_conical_frustum(1_0, 2_0, 3_0) = }")
print(F"Cylinder: {surface_area_cylinder(1_0, 2_0) = }")
print(F"Torus: {surface_area_torus(2_0, 1_0) = }")
print(F"Equilateral Triangle: {area_reg_polygon(3, 1_0) = }")
print(F"Square: {area_reg_polygon(4, 1_0) = }")
print(F"Reqular Pentagon: {area_reg_polygon(5, 1_0) = }")
| 666 | import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class snake_case__ ( datasets.BeamBasedBuilder ):
def UpperCAmelCase__ ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
return datasets.DatasetInfo(
features=datasets.Features({"content": datasets.Value("string" )} ) , supervised_keys=A__ , )
def UpperCAmelCase__ ( self : Optional[Any] , A__ : str , A__ : str ) -> Optional[int]:
'''simple docstring'''
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"examples": get_test_dummy_examples()} )]
def UpperCAmelCase__ ( self : int , A__ : Optional[int] , A__ : Dict ) -> Optional[Any]:
'''simple docstring'''
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(A__ )
class snake_case__ ( datasets.BeamBasedBuilder ):
def UpperCAmelCase__ ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
return datasets.DatasetInfo(
features=datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string" )} )} ) , supervised_keys=A__ , )
def UpperCAmelCase__ ( self : Any , A__ : List[str] , A__ : str ) -> Optional[int]:
'''simple docstring'''
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"examples": get_test_nested_examples()} )
]
def UpperCAmelCase__ ( self : List[Any] , A__ : List[str] , A__ : Optional[int] ) -> List[str]:
'''simple docstring'''
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(A__ )
def SCREAMING_SNAKE_CASE_ ( ):
return [(i, {"content": content}) for i, content in enumerate(["foo", "bar", "foobar"] )]
def SCREAMING_SNAKE_CASE_ ( ):
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(["foo", "bar", "foobar"] )]
class snake_case__ ( _UpperCamelCase ):
@require_beam
def UpperCAmelCase__ ( self : str ) -> List[str]:
'''simple docstring'''
snake_case_ : Union[str, Any] = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
snake_case_ : Dict = DummyBeamDataset(cache_dir=A__ , beam_runner="DirectRunner" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(A__ , builder.name , "default" , "0.0.0" , f"{builder.name}-train.arrow" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"content": datasets.Value("string" )} ) )
snake_case_ : Optional[int] = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , A__ )
self.assertEqual(dset["train"].info.splits["train"].num_examples , A__ )
self.assertDictEqual(dset["train"][0] , get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset["train"][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(A__ , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
@require_beam
def UpperCAmelCase__ ( self : str ) -> Optional[Any]:
'''simple docstring'''
import apache_beam as beam
snake_case_ : Tuple = beam.io.parquetio.WriteToParquet
snake_case_ : Union[str, Any] = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
snake_case_ : List[Any] = DummyBeamDataset(cache_dir=A__ , beam_runner="DirectRunner" )
with patch("apache_beam.io.parquetio.WriteToParquet" ) as write_parquet_mock:
snake_case_ : int = partial(A__ , num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
A__ , builder.name , "default" , "0.0.0" , f"{builder.name}-train-00000-of-00002.arrow" ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
A__ , builder.name , "default" , "0.0.0" , f"{builder.name}-train-00000-of-00002.arrow" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"content": datasets.Value("string" )} ) )
snake_case_ : Optional[Any] = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , A__ )
self.assertEqual(dset["train"].info.splits["train"].num_examples , A__ )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset["train"]["content"] ) , sorted(["foo", "bar", "foobar"] ) )
self.assertTrue(
os.path.exists(os.path.join(A__ , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
@require_beam
def UpperCAmelCase__ ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_cache_dir:
snake_case_ : Tuple = DummyBeamDataset(cache_dir=A__ )
self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare )
@require_beam
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Optional[int] = len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
snake_case_ : List[str] = NestedBeamDataset(cache_dir=A__ , beam_runner="DirectRunner" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(A__ , builder.name , "default" , "0.0.0" , f"{builder.name}-train.arrow" ) ) )
self.assertDictEqual(
builder.info.features , datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string" )} )} ) )
snake_case_ : int = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , A__ )
self.assertEqual(dset["train"].info.splits["train"].num_examples , A__ )
self.assertDictEqual(dset["train"][0] , get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset["train"][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(A__ , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
| 666 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
"uclanlp/visualbert-vqa": "https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json",
"uclanlp/visualbert-vqa-pre": "https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json",
"uclanlp/visualbert-vqa-coco-pre": (
"https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json"
),
"uclanlp/visualbert-vcr": "https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json",
"uclanlp/visualbert-vcr-pre": "https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json",
"uclanlp/visualbert-vcr-coco-pre": (
"https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json"
),
"uclanlp/visualbert-nlvr2": "https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json",
"uclanlp/visualbert-nlvr2-pre": "https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json",
"uclanlp/visualbert-nlvr2-coco-pre": (
"https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json"
)
# See all VisualBERT models at https://huggingface.co/models?filter=visual_bert
}
class snake_case__ ( _UpperCamelCase ):
_SCREAMING_SNAKE_CASE : str = "visual_bert"
def __init__( self : Optional[Any] , A__ : Optional[int]=3_05_22 , A__ : Optional[int]=7_68 , A__ : Dict=5_12 , A__ : int=12 , A__ : Union[str, Any]=12 , A__ : Any=30_72 , A__ : Dict="gelu" , A__ : Optional[Any]=0.1 , A__ : Optional[Any]=0.1 , A__ : Tuple=5_12 , A__ : Optional[int]=2 , A__ : int=0.02 , A__ : Optional[int]=1E-12 , A__ : Optional[int]=False , A__ : List[str]=True , A__ : Optional[Any]=1 , A__ : Optional[int]=0 , A__ : int=2 , **A__ : int , ) -> str:
'''simple docstring'''
super().__init__(pad_token_id=A__ , bos_token_id=A__ , eos_token_id=A__ , **A__ )
snake_case_ : str = vocab_size
snake_case_ : str = max_position_embeddings
snake_case_ : Optional[Any] = hidden_size
snake_case_ : Any = visual_embedding_dim
snake_case_ : List[Any] = num_hidden_layers
snake_case_ : Optional[Any] = num_attention_heads
snake_case_ : Tuple = intermediate_size
snake_case_ : List[Any] = hidden_act
snake_case_ : Optional[Any] = hidden_dropout_prob
snake_case_ : Union[str, Any] = attention_probs_dropout_prob
snake_case_ : List[str] = initializer_range
snake_case_ : Tuple = type_vocab_size
snake_case_ : Optional[Any] = layer_norm_eps
snake_case_ : List[Any] = bypass_transformer
snake_case_ : int = special_visual_initialize
| 666 | import pytest
from datasets import inspect_metric, list_metrics, load_metric
@pytest.fixture
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: int ):
monkeypatch.setattr("datasets.utils.deprecation_utils._emitted_deprecation_warnings" , set() )
@pytest.fixture
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: Tuple ):
class snake_case__ :
def __init__( self : Any , A__ : List[str] ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : List[Any] = metric_id
class snake_case__ :
_SCREAMING_SNAKE_CASE : List[str] = [MetricMock(_UpperCamelCase ) for metric_id in ["accuracy", "mse", "precision", "codeparrot/apps_metric"]]
def UpperCAmelCase__ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
return self._metrics
monkeypatch.setattr("datasets.inspect.huggingface_hub" , HfhMock() )
@pytest.mark.parametrize(
"func, args" , [(load_metric, ("metrics/mse",)), (list_metrics, ()), (inspect_metric, ("metrics/mse", "tmp_path"))] )
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: Tuple , lowerCAmelCase_: int , lowerCAmelCase_: List[Any] , lowerCAmelCase_: Any , lowerCAmelCase_: List[str] ):
if "tmp_path" in args:
snake_case_ : List[Any] = tuple(arg if arg != "tmp_path" else tmp_path for arg in args )
with pytest.warns(lowerCAmelCase_ , match="https://huggingface.co/docs/evaluate" ):
func(*lowerCAmelCase_ )
| 666 | 1 |
import json
import sys
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: List[str] , lowerCAmelCase_: List[Any] ):
with open(lowerCAmelCase_ , encoding="utf-8" ) as f:
snake_case_ : Dict = json.load(lowerCAmelCase_ )
snake_case_ : Optional[int] = ["<details>", "<summary>Show updated benchmarks!</summary>", " "]
for benchmark_name in sorted(lowerCAmelCase_ ):
snake_case_ : Any = results[benchmark_name]
snake_case_ : Union[str, Any] = benchmark_name.split("/" )[-1]
output_md.append(f"### Benchmark: {benchmark_file_name}" )
snake_case_ : str = "| metric |"
snake_case_ : Optional[int] = "|--------|"
snake_case_ : Optional[int] = "| new / old (diff) |"
for metric_name in sorted(lowerCAmelCase_ ):
snake_case_ : Optional[Any] = benchmark_res[metric_name]
snake_case_ : str = metric_vals["new"]
snake_case_ : Tuple = metric_vals.get("old" , lowerCAmelCase_ )
snake_case_ : List[Any] = metric_vals.get("diff" , lowerCAmelCase_ )
snake_case_ : Optional[Any] = f" {new_val:f}" if isinstance(lowerCAmelCase_ , (int, float) ) else "None"
if old_val is not None:
val_str += f" / {old_val:f}" if isinstance(lowerCAmelCase_ , (int, float) ) else "None"
if dif_val is not None:
val_str += f" ({dif_val:f})" if isinstance(lowerCAmelCase_ , (int, float) ) else "None"
title += " " + metric_name + " |"
lines += "---|"
value += val_str + " |"
output_md += [title, lines, value, " "]
output_md.append("</details>" )
with open(lowerCAmelCase_ , "w" , encoding="utf-8" ) as f:
f.writelines("\n".join(lowerCAmelCase_ ) )
if __name__ == "__main__":
UpperCAmelCase = sys.argv[1]
UpperCAmelCase = sys.argv[2]
format_json_to_md(input_json_file, output_md_file)
| 666 | from __future__ import annotations
import bisect
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: list[int] , lowerCAmelCase_: int , lowerCAmelCase_: int = 0 , lowerCAmelCase_: int = -1 ):
if hi < 0:
snake_case_ : Any = len(lowerCAmelCase_ )
while lo < hi:
snake_case_ : List[Any] = lo + (hi - lo) // 2
if sorted_collection[mid] < item:
snake_case_ : Tuple = mid + 1
else:
snake_case_ : Dict = mid
return lo
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: list[int] , lowerCAmelCase_: int , lowerCAmelCase_: int = 0 , lowerCAmelCase_: int = -1 ):
if hi < 0:
snake_case_ : Optional[Any] = len(lowerCAmelCase_ )
while lo < hi:
snake_case_ : Union[str, Any] = lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
snake_case_ : Optional[Any] = mid + 1
else:
snake_case_ : Tuple = mid
return lo
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: list[int] , lowerCAmelCase_: int , lowerCAmelCase_: int = 0 , lowerCAmelCase_: int = -1 ):
sorted_collection.insert(bisect_left(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) , lowerCAmelCase_ )
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: list[int] , lowerCAmelCase_: int , lowerCAmelCase_: int = 0 , lowerCAmelCase_: int = -1 ):
sorted_collection.insert(bisect_right(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) , lowerCAmelCase_ )
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: list[int] , lowerCAmelCase_: int ):
snake_case_ : Dict = 0
snake_case_ : Tuple = len(lowerCAmelCase_ ) - 1
while left <= right:
snake_case_ : int = left + (right - left) // 2
snake_case_ : Optional[Any] = sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
snake_case_ : Optional[Any] = midpoint - 1
else:
snake_case_ : Optional[int] = midpoint + 1
return None
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: list[int] , lowerCAmelCase_: int ):
snake_case_ : Optional[int] = bisect.bisect_left(lowerCAmelCase_ , lowerCAmelCase_ )
if index != len(lowerCAmelCase_ ) and sorted_collection[index] == item:
return index
return None
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: list[int] , lowerCAmelCase_: int , lowerCAmelCase_: int , lowerCAmelCase_: int ):
if right < left:
return None
snake_case_ : List[Any] = left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , midpoint - 1 )
else:
return binary_search_by_recursion(lowerCAmelCase_ , lowerCAmelCase_ , midpoint + 1 , lowerCAmelCase_ )
if __name__ == "__main__":
UpperCAmelCase = input("Enter numbers separated by comma:\n").strip()
UpperCAmelCase = sorted(int(item) for item in user_input.split(","))
UpperCAmelCase = int(input("Enter a single number to be found in the list:\n"))
UpperCAmelCase = binary_search(collection, target)
if result is None:
print(F"{target} was not found in {collection}.")
else:
print(F"{target} was found at position {result} in {collection}.")
| 666 | 1 |
from __future__ import annotations
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: int , lowerCAmelCase_: int ):
if partitions <= 0:
raise ValueError("partitions must be a positive number!" )
if partitions > number_of_bytes:
raise ValueError("partitions can not > number_of_bytes!" )
snake_case_ : Union[str, Any] = number_of_bytes // partitions
snake_case_ : Optional[int] = []
for i in range(lowerCAmelCase_ ):
snake_case_ : Union[str, Any] = i * bytes_per_partition + 1
snake_case_ : Tuple = (
number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition
)
allocation_list.append(f"{start_bytes}-{end_bytes}" )
return allocation_list
if __name__ == "__main__":
import doctest
doctest.testmod()
| 666 | import inspect
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel, VQModel
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class snake_case__ ( _UpperCamelCase ):
def __init__( self : Union[str, Any] , A__ : VQModel , A__ : UNetaDModel , A__ : DDIMScheduler ) -> List[Any]:
'''simple docstring'''
super().__init__()
self.register_modules(vqvae=A__ , unet=A__ , scheduler=A__ )
@torch.no_grad()
def __call__( self : str , A__ : int = 1 , A__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , A__ : float = 0.0 , A__ : int = 50 , A__ : Optional[str] = "pil" , A__ : bool = True , **A__ : Optional[Any] , ) -> Union[Tuple, ImagePipelineOutput]:
'''simple docstring'''
snake_case_ : Optional[int] = randn_tensor(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=A__ , )
snake_case_ : List[Any] = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
snake_case_ : Any = latents * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(A__ )
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
snake_case_ : Union[str, Any] = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
snake_case_ : List[Any] = {}
if accepts_eta:
snake_case_ : int = eta
for t in self.progress_bar(self.scheduler.timesteps ):
snake_case_ : Union[str, Any] = self.scheduler.scale_model_input(A__ , A__ )
# predict the noise residual
snake_case_ : Dict = self.unet(A__ , A__ ).sample
# compute the previous noisy sample x_t -> x_t-1
snake_case_ : Union[str, Any] = self.scheduler.step(A__ , A__ , A__ , **A__ ).prev_sample
# decode the image latents with the VAE
snake_case_ : int = self.vqvae.decode(A__ ).sample
snake_case_ : Dict = (image / 2 + 0.5).clamp(0 , 1 )
snake_case_ : Dict = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
snake_case_ : Optional[int] = self.numpy_to_pil(A__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=A__ )
| 666 | 1 |
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: Optional[Any] , lowerCAmelCase_: Optional[Any] ):
print("\nThe shortest path matrix using Floyd Warshall algorithm\n" )
for i in range(lowerCAmelCase_ ):
for j in range(lowerCAmelCase_ ):
if dist[i][j] != float("inf" ):
print(int(dist[i][j] ) , end="\t" )
else:
print("INF" , end="\t" )
print()
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: Tuple , lowerCAmelCase_: List[str] ):
snake_case_ : Optional[int] = [[float("inf" ) for _ in range(lowerCAmelCase_ )] for _ in range(lowerCAmelCase_ )]
for i in range(lowerCAmelCase_ ):
for j in range(lowerCAmelCase_ ):
snake_case_ : Tuple = graph[i][j]
# check vertex k against all other vertices (i, j)
for k in range(lowerCAmelCase_ ):
# looping through rows of graph array
for i in range(lowerCAmelCase_ ):
# looping through columns of graph array
for j in range(lowerCAmelCase_ ):
if (
dist[i][k] != float("inf" )
and dist[k][j] != float("inf" )
and dist[i][k] + dist[k][j] < dist[i][j]
):
snake_case_ : List[str] = dist[i][k] + dist[k][j]
_print_dist(lowerCAmelCase_ , lowerCAmelCase_ )
return dist, v
if __name__ == "__main__":
UpperCAmelCase = int(input("Enter number of vertices: "))
UpperCAmelCase = int(input("Enter number of edges: "))
UpperCAmelCase = [[float("inf") for i in range(v)] for j in range(v)]
for i in range(v):
UpperCAmelCase = 0.0
# src and dst are indices that must be within the array size graph[e][v]
# failure to follow this will result in an error
for i in range(e):
print("\nEdge ", i + 1)
UpperCAmelCase = int(input("Enter source:"))
UpperCAmelCase = int(input("Enter destination:"))
UpperCAmelCase = float(input("Enter weight:"))
UpperCAmelCase = weight
floyd_warshall(graph, v)
# Example Input
# Enter number of vertices: 3
# Enter number of edges: 2
# # generated graph from vertex and edge inputs
# [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]]
# [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]]
# specify source, destination and weight for edge #1
# Edge 1
# Enter source:1
# Enter destination:2
# Enter weight:2
# specify source, destination and weight for edge #2
# Edge 2
# Enter source:2
# Enter destination:1
# Enter weight:1
# # Expected Output from the vertice, edge and src, dst, weight inputs!!
# 0 INF INF
# INF 0 2
# INF 1 0
| 666 | from decimal import Decimal, getcontext
from math import ceil, factorial
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: int ):
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
raise TypeError("Undefined for non-integers" )
elif precision < 1:
raise ValueError("Undefined for non-natural numbers" )
snake_case_ : List[str] = precision
snake_case_ : Union[str, Any] = ceil(precision / 1_4 )
snake_case_ : List[str] = 4_2_6_8_8_0 * Decimal(1_0_0_0_5 ).sqrt()
snake_case_ : str = 1
snake_case_ : List[str] = 1_3_5_9_1_4_0_9
snake_case_ : str = Decimal(lowerCAmelCase_ )
for k in range(1 , lowerCAmelCase_ ):
snake_case_ : Tuple = factorial(6 * k ) // (factorial(3 * k ) * factorial(lowerCAmelCase_ ) ** 3)
linear_term += 5_4_5_1_4_0_1_3_4
exponential_term *= -2_6_2_5_3_7_4_1_2_6_4_0_7_6_8_0_0_0
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
UpperCAmelCase = 5_0
print(F"The first {n} digits of pi is: {pi(n)}")
| 666 | 1 |
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: int ):
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
raise TypeError("Input value must be an 'int' type" )
snake_case_ : Dict = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 666 | def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: int = 1_0_0_0 ):
snake_case_ ,snake_case_ : List[str] = 1, 1
snake_case_ : List[str] = 2
while True:
snake_case_ : Tuple = 0
snake_case_ : Union[str, Any] = fa + fa
snake_case_ ,snake_case_ : str = fa, f
index += 1
for _ in str(lowerCAmelCase_ ):
i += 1
if i == n:
break
return index
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 666 | 1 |
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class snake_case__ ( _UpperCamelCase , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : Optional[int] = KandinskyVaaPriorPipeline
_SCREAMING_SNAKE_CASE : Optional[int] = ["prompt"]
_SCREAMING_SNAKE_CASE : Union[str, Any] = ["prompt", "negative_prompt"]
_SCREAMING_SNAKE_CASE : Dict = [
"num_images_per_prompt",
"generator",
"num_inference_steps",
"latents",
"negative_prompt",
"guidance_scale",
"output_type",
"return_dict",
]
_SCREAMING_SNAKE_CASE : Tuple = False
@property
def UpperCAmelCase__ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
return 32
@property
def UpperCAmelCase__ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
return 32
@property
def UpperCAmelCase__ ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
return self.time_input_dim
@property
def UpperCAmelCase__ ( self : List[Any] ) -> Any:
'''simple docstring'''
return self.time_input_dim * 4
@property
def UpperCAmelCase__ ( self : str ) -> Any:
'''simple docstring'''
return 1_00
@property
def UpperCAmelCase__ ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Dict = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
return tokenizer
@property
def UpperCAmelCase__ ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ : Any = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
return CLIPTextModelWithProjection(A__ )
@property
def UpperCAmelCase__ ( self : List[Any] ) -> Tuple:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ : int = {
"num_attention_heads": 2,
"attention_head_dim": 12,
"embedding_dim": self.text_embedder_hidden_size,
"num_layers": 1,
}
snake_case_ : List[str] = PriorTransformer(**A__ )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
snake_case_ : Optional[int] = nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ : Dict = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=2_24 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=14 , )
snake_case_ : Tuple = CLIPVisionModelWithProjection(A__ )
return model
@property
def UpperCAmelCase__ ( self : Any ) -> Dict:
'''simple docstring'''
snake_case_ : str = CLIPImageProcessor(
crop_size=2_24 , do_center_crop=A__ , do_normalize=A__ , do_resize=A__ , image_mean=[0.4814_5466, 0.457_8275, 0.4082_1073] , image_std=[0.2686_2954, 0.2613_0258, 0.2757_7711] , resample=3 , size=2_24 , )
return image_processor
def UpperCAmelCase__ ( self : List[Any] ) -> List[str]:
'''simple docstring'''
snake_case_ : int = self.dummy_prior
snake_case_ : Union[str, Any] = self.dummy_image_encoder
snake_case_ : List[str] = self.dummy_text_encoder
snake_case_ : Optional[int] = self.dummy_tokenizer
snake_case_ : Optional[int] = self.dummy_image_processor
snake_case_ : int = UnCLIPScheduler(
variance_type="fixed_small_log" , prediction_type="sample" , num_train_timesteps=10_00 , clip_sample=A__ , clip_sample_range=10.0 , )
snake_case_ : Optional[int] = {
"prior": prior,
"image_encoder": image_encoder,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"scheduler": scheduler,
"image_processor": image_processor,
}
return components
def UpperCAmelCase__ ( self : Tuple , A__ : List[Any] , A__ : int=0 ) -> List[Any]:
'''simple docstring'''
if str(A__ ).startswith("mps" ):
snake_case_ : str = torch.manual_seed(A__ )
else:
snake_case_ : Optional[Any] = torch.Generator(device=A__ ).manual_seed(A__ )
snake_case_ : str = {
"prompt": "horse",
"generator": generator,
"guidance_scale": 4.0,
"num_inference_steps": 2,
"output_type": "np",
}
return inputs
def UpperCAmelCase__ ( self : List[Any] ) -> Tuple:
'''simple docstring'''
snake_case_ : List[Any] = "cpu"
snake_case_ : Union[str, Any] = self.get_dummy_components()
snake_case_ : Dict = self.pipeline_class(**A__ )
snake_case_ : Tuple = pipe.to(A__ )
pipe.set_progress_bar_config(disable=A__ )
snake_case_ : int = pipe(**self.get_dummy_inputs(A__ ) )
snake_case_ : Tuple = output.image_embeds
snake_case_ : List[str] = pipe(
**self.get_dummy_inputs(A__ ) , return_dict=A__ , )[0]
snake_case_ : Tuple = image[0, -10:]
snake_case_ : int = image_from_tuple[0, -10:]
assert image.shape == (1, 32)
snake_case_ : List[Any] = np.array(
[-0.0532, 1.7120, 0.3656, -1.0852, -0.8946, -1.1756, 0.4348, 0.2482, 0.5146, -0.1156] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def UpperCAmelCase__ ( self : Any ) -> List[Any]:
'''simple docstring'''
snake_case_ : str = torch_device == "cpu"
snake_case_ : List[str] = True
snake_case_ : Optional[Any] = False
self._test_inference_batch_single_identical(
test_max_difference=A__ , relax_max_difference=A__ , test_mean_pixel_difference=A__ , )
@skip_mps
def UpperCAmelCase__ ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Optional[Any] = torch_device == "cpu"
snake_case_ : int = False
self._test_attention_slicing_forward_pass(
test_max_difference=A__ , test_mean_pixel_difference=A__ , )
| 666 | from __future__ import annotations
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: list[int | float] , lowerCAmelCase_: int , lowerCAmelCase_: int ):
if len(lowerCAmelCase_ ) == 0:
raise ValueError("find_max() arg is an empty sequence" )
if (
left >= len(lowerCAmelCase_ )
or left < -len(lowerCAmelCase_ )
or right >= len(lowerCAmelCase_ )
or right < -len(lowerCAmelCase_ )
):
raise IndexError("list index out of range" )
if left == right:
return nums[left]
snake_case_ : List[Any] = (left + right) >> 1 # the middle
snake_case_ : Dict = find_max(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) # find max in range[left, mid]
snake_case_ : int = find_max(lowerCAmelCase_ , mid + 1 , lowerCAmelCase_ ) # find max in range[mid + 1, right]
return left_max if left_max >= right_max else right_max
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 666 | 1 |
import json
import os
import re
import sys
import urllib.request
import requests
from bsa import BeautifulSoup
UpperCAmelCase = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36"
" (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582"
}
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: str = "dhaka" , lowerCAmelCase_: int = 5 ):
snake_case_ : Any = min(lowerCAmelCase_ , 5_0 ) # Prevent abuse!
snake_case_ : List[str] = {
"q": query,
"tbm": "isch",
"hl": "en",
"ijn": "0",
}
snake_case_ : Dict = requests.get("https://www.google.com/search" , params=lowerCAmelCase_ , headers=lowerCAmelCase_ )
snake_case_ : List[str] = BeautifulSoup(html.text , "html.parser" )
snake_case_ : Union[str, Any] = "".join(
re.findall(R"AF_initDataCallback\(([^<]+)\);" , str(soup.select("script" ) ) ) )
snake_case_ : Optional[Any] = json.dumps(lowerCAmelCase_ )
snake_case_ : Optional[Any] = json.loads(lowerCAmelCase_ )
snake_case_ : List[str] = re.findall(
R"\[\"GRID_STATE0\",null,\[\[1,\[0,\".*?\",(.*),\"All\"," , lowerCAmelCase_ , )
if not matched_google_image_data:
return 0
snake_case_ : int = re.sub(
R"\[\"(https\:\/\/encrypted-tbn0\.gstatic\.com\/images\?.*?)\",\d+,\d+\]" , "" , str(lowerCAmelCase_ ) , )
snake_case_ : Any = re.findall(
R"(?:'|,),\[\"(https:|http.*?)\",\d+,\d+\]" , lowerCAmelCase_ , )
for index, fixed_full_res_image in enumerate(lowerCAmelCase_ ):
if index >= max_images:
return index
snake_case_ : int = bytes(lowerCAmelCase_ , "ascii" ).decode(
"unicode-escape" )
snake_case_ : Optional[Any] = bytes(lowerCAmelCase_ , "ascii" ).decode(
"unicode-escape" )
snake_case_ : Union[str, Any] = urllib.request.build_opener()
snake_case_ : int = [
(
"User-Agent",
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36"
" (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582",
)
]
urllib.request.install_opener(lowerCAmelCase_ )
snake_case_ : Dict = f"query_{query.replace(' ' , '_' )}"
if not os.path.exists(lowerCAmelCase_ ):
os.makedirs(lowerCAmelCase_ )
urllib.request.urlretrieve( # noqa: S310
lowerCAmelCase_ , f"{path_name}/original_size_img_{index}.jpg" )
return index
if __name__ == "__main__":
try:
UpperCAmelCase = download_images_from_google_query(sys.argv[1])
print(F"{image_count} images were downloaded to disk.")
except IndexError:
print("Please provide a search term.")
raise
| 666 | import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
UpperCAmelCase = {
"vocab_file": {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/vocab.json",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/vocab.json",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/vocab.json",
"roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json",
"roberta-large-openai-detector": (
"https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json"
),
},
"merges_file": {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/merges.txt",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/merges.txt",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/merges.txt",
"roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt",
"roberta-large-openai-detector": (
"https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt"
),
},
"tokenizer_file": {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/tokenizer.json",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/tokenizer.json",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json",
"roberta-base-openai-detector": (
"https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json"
),
"roberta-large-openai-detector": (
"https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json"
),
},
}
UpperCAmelCase = {
"roberta-base": 5_1_2,
"roberta-large": 5_1_2,
"roberta-large-mnli": 5_1_2,
"distilroberta-base": 5_1_2,
"roberta-base-openai-detector": 5_1_2,
"roberta-large-openai-detector": 5_1_2,
}
class snake_case__ ( _UpperCamelCase ):
_SCREAMING_SNAKE_CASE : Dict = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE : Any = PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_SCREAMING_SNAKE_CASE : int = ["input_ids", "attention_mask"]
_SCREAMING_SNAKE_CASE : List[str] = RobertaTokenizer
def __init__( self : Optional[int] , A__ : List[Any]=None , A__ : Optional[int]=None , A__ : List[str]=None , A__ : Dict="replace" , A__ : List[str]="<s>" , A__ : Optional[Any]="</s>" , A__ : List[str]="</s>" , A__ : List[Any]="<s>" , A__ : int="<unk>" , A__ : int="<pad>" , A__ : List[Any]="<mask>" , A__ : Any=False , A__ : Optional[int]=True , **A__ : Union[str, Any] , ) -> int:
'''simple docstring'''
super().__init__(
A__ , A__ , tokenizer_file=A__ , errors=A__ , bos_token=A__ , eos_token=A__ , sep_token=A__ , cls_token=A__ , unk_token=A__ , pad_token=A__ , mask_token=A__ , add_prefix_space=A__ , trim_offsets=A__ , **A__ , )
snake_case_ : Dict = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , A__ ) != add_prefix_space:
snake_case_ : List[Any] = getattr(A__ , pre_tok_state.pop("type" ) )
snake_case_ : Any = add_prefix_space
snake_case_ : List[Any] = pre_tok_class(**A__ )
snake_case_ : Optional[int] = add_prefix_space
snake_case_ : List[str] = "post_processor"
snake_case_ : Tuple = getattr(self.backend_tokenizer , A__ , A__ )
if tokenizer_component_instance:
snake_case_ : List[str] = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
snake_case_ : str = tuple(state["sep"] )
if "cls" in state:
snake_case_ : Tuple = tuple(state["cls"] )
snake_case_ : Tuple = False
if state.get("add_prefix_space" , A__ ) != add_prefix_space:
snake_case_ : Optional[Any] = add_prefix_space
snake_case_ : str = True
if state.get("trim_offsets" , A__ ) != trim_offsets:
snake_case_ : Optional[int] = trim_offsets
snake_case_ : List[Any] = True
if changes_to_apply:
snake_case_ : int = getattr(A__ , state.pop("type" ) )
snake_case_ : List[Any] = component_class(**A__ )
setattr(self.backend_tokenizer , A__ , A__ )
@property
def UpperCAmelCase__ ( self : Optional[Any] ) -> str:
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def UpperCAmelCase__ ( self : Tuple , A__ : Dict ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Any = AddedToken(A__ , lstrip=A__ , rstrip=A__ ) if isinstance(A__ , A__ ) else value
snake_case_ : Any = value
def UpperCAmelCase__ ( self : int , *A__ : Optional[Any] , **A__ : int ) -> BatchEncoding:
'''simple docstring'''
snake_case_ : Optional[Any] = kwargs.get("is_split_into_words" , A__ )
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*A__ , **A__ )
def UpperCAmelCase__ ( self : Union[str, Any] , *A__ : Any , **A__ : List[Any] ) -> BatchEncoding:
'''simple docstring'''
snake_case_ : Optional[int] = kwargs.get("is_split_into_words" , A__ )
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._encode_plus(*A__ , **A__ )
def UpperCAmelCase__ ( self : Tuple , A__ : str , A__ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
snake_case_ : Optional[Any] = self._tokenizer.model.save(A__ , name=A__ )
return tuple(A__ )
def UpperCAmelCase__ ( self : int , A__ : List[str] , A__ : Union[str, Any]=None ) -> Any:
'''simple docstring'''
snake_case_ : List[str] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def UpperCAmelCase__ ( self : Dict , A__ : List[int] , A__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
snake_case_ : str = [self.sep_token_id]
snake_case_ : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 666 | 1 |
import shutil
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_tf_cross_test,
require_tf,
require_torch,
require_torchvision,
require_vision,
)
from transformers.utils import is_tf_available, is_torch_available, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, SamImageProcessor, SamProcessor
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
@require_vision
@require_torchvision
class snake_case__ ( unittest.TestCase ):
def UpperCAmelCase__ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
snake_case_ : str = tempfile.mkdtemp()
snake_case_ : List[str] = SamImageProcessor()
snake_case_ : Dict = SamProcessor(A__ )
processor.save_pretrained(self.tmpdirname )
def UpperCAmelCase__ ( self : Optional[int] , **A__ : Optional[Any] ) -> Dict:
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **A__ ).image_processor
def UpperCAmelCase__ ( self : Tuple ) -> str:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase__ ( self : Dict ) -> Any:
'''simple docstring'''
snake_case_ : Union[str, Any] = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
snake_case_ : List[str] = [Image.fromarray(np.moveaxis(A__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCAmelCase__ ( self : int ) -> int:
'''simple docstring'''
snake_case_ : List[str] = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
snake_case_ : Any = self.get_image_processor(do_normalize=A__ , padding_value=1.0 )
snake_case_ : Any = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=A__ , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , A__ )
def UpperCAmelCase__ ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
snake_case_ : int = self.get_image_processor()
snake_case_ : Dict = SamProcessor(image_processor=A__ )
snake_case_ : Union[str, Any] = self.prepare_image_inputs()
snake_case_ : Tuple = image_processor(A__ , return_tensors="np" )
snake_case_ : int = processor(images=A__ , return_tensors="np" )
input_feat_extract.pop("original_sizes" ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop("reshaped_input_sizes" ) # pop original_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
@require_torch
def UpperCAmelCase__ ( self : List[Any] ) -> int:
'''simple docstring'''
snake_case_ : Optional[int] = self.get_image_processor()
snake_case_ : Tuple = SamProcessor(image_processor=A__ )
snake_case_ : List[str] = [torch.ones((1, 3, 5, 5) )]
snake_case_ : Union[str, Any] = [[17_64, 26_46]]
snake_case_ : List[str] = [[6_83, 10_24]]
snake_case_ : Any = processor.post_process_masks(A__ , A__ , A__ )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
snake_case_ : Union[str, Any] = processor.post_process_masks(
A__ , torch.tensor(A__ ) , torch.tensor(A__ ) )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
# should also work with np
snake_case_ : str = [np.ones((1, 3, 5, 5) )]
snake_case_ : Tuple = processor.post_process_masks(A__ , np.array(A__ ) , np.array(A__ ) )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
snake_case_ : str = [[1, 0], [0, 1]]
with self.assertRaises(A__ ):
snake_case_ : Optional[Any] = processor.post_process_masks(A__ , np.array(A__ ) , np.array(A__ ) )
@require_vision
@require_tf
class snake_case__ ( unittest.TestCase ):
def UpperCAmelCase__ ( self : Dict ) -> List[Any]:
'''simple docstring'''
snake_case_ : Optional[int] = tempfile.mkdtemp()
snake_case_ : Dict = SamImageProcessor()
snake_case_ : Union[str, Any] = SamProcessor(A__ )
processor.save_pretrained(self.tmpdirname )
def UpperCAmelCase__ ( self : Dict , **A__ : Optional[Any] ) -> Dict:
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **A__ ).image_processor
def UpperCAmelCase__ ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase__ ( self : List[str] ) -> Tuple:
'''simple docstring'''
snake_case_ : Optional[int] = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
snake_case_ : Optional[Any] = [Image.fromarray(np.moveaxis(A__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCAmelCase__ ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
snake_case_ : Optional[Any] = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
snake_case_ : Any = self.get_image_processor(do_normalize=A__ , padding_value=1.0 )
snake_case_ : Dict = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=A__ , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , A__ )
def UpperCAmelCase__ ( self : Tuple ) -> Dict:
'''simple docstring'''
snake_case_ : Any = self.get_image_processor()
snake_case_ : Optional[Any] = SamProcessor(image_processor=A__ )
snake_case_ : Any = self.prepare_image_inputs()
snake_case_ : Any = image_processor(A__ , return_tensors="np" )
snake_case_ : List[Any] = processor(images=A__ , return_tensors="np" )
input_feat_extract.pop("original_sizes" ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop("reshaped_input_sizes" ) # pop reshaped_input_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
@require_tf
def UpperCAmelCase__ ( self : Tuple ) -> Any:
'''simple docstring'''
snake_case_ : Optional[int] = self.get_image_processor()
snake_case_ : List[str] = SamProcessor(image_processor=A__ )
snake_case_ : Dict = [tf.ones((1, 3, 5, 5) )]
snake_case_ : Dict = [[17_64, 26_46]]
snake_case_ : str = [[6_83, 10_24]]
snake_case_ : Union[str, Any] = processor.post_process_masks(A__ , A__ , A__ , return_tensors="tf" )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
snake_case_ : List[str] = processor.post_process_masks(
A__ , tf.convert_to_tensor(A__ ) , tf.convert_to_tensor(A__ ) , return_tensors="tf" , )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
# should also work with np
snake_case_ : List[str] = [np.ones((1, 3, 5, 5) )]
snake_case_ : Tuple = processor.post_process_masks(
A__ , np.array(A__ ) , np.array(A__ ) , return_tensors="tf" )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
snake_case_ : Any = [[1, 0], [0, 1]]
with self.assertRaises(tf.errors.InvalidArgumentError ):
snake_case_ : Any = processor.post_process_masks(
A__ , np.array(A__ ) , np.array(A__ ) , return_tensors="tf" )
@require_vision
@require_torchvision
class snake_case__ ( unittest.TestCase ):
def UpperCAmelCase__ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
snake_case_ : Optional[int] = tempfile.mkdtemp()
snake_case_ : int = SamImageProcessor()
snake_case_ : int = SamProcessor(A__ )
processor.save_pretrained(self.tmpdirname )
def UpperCAmelCase__ ( self : List[Any] , **A__ : str ) -> Tuple:
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **A__ ).image_processor
def UpperCAmelCase__ ( self : Optional[Any] ) -> str:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase__ ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
snake_case_ : Dict = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
snake_case_ : Any = [Image.fromarray(np.moveaxis(A__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
@is_pt_tf_cross_test
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
snake_case_ : str = self.get_image_processor()
snake_case_ : Optional[Any] = SamProcessor(image_processor=A__ )
snake_case_ : Optional[Any] = np.random.randint(0 , 2 , size=(1, 3, 5, 5) ).astype(np.floataa )
snake_case_ : int = [tf.convert_to_tensor(A__ )]
snake_case_ : List[str] = [torch.tensor(A__ )]
snake_case_ : Tuple = [[17_64, 26_46]]
snake_case_ : Optional[Any] = [[6_83, 10_24]]
snake_case_ : List[str] = processor.post_process_masks(
A__ , A__ , A__ , return_tensors="tf" )
snake_case_ : Dict = processor.post_process_masks(
A__ , A__ , A__ , return_tensors="pt" )
self.assertTrue(np.all(tf_masks[0].numpy() == pt_masks[0].numpy() ) )
@is_pt_tf_cross_test
def UpperCAmelCase__ ( self : Dict ) -> List[str]:
'''simple docstring'''
snake_case_ : Tuple = self.get_image_processor()
snake_case_ : Union[str, Any] = SamProcessor(image_processor=A__ )
snake_case_ : List[str] = self.prepare_image_inputs()
snake_case_ : int = image_processor(A__ , return_tensors="pt" )["pixel_values"].numpy()
snake_case_ : Union[str, Any] = processor(images=A__ , return_tensors="pt" )["pixel_values"].numpy()
snake_case_ : int = image_processor(A__ , return_tensors="tf" )["pixel_values"].numpy()
snake_case_ : Dict = processor(images=A__ , return_tensors="tf" )["pixel_values"].numpy()
self.assertTrue(np.allclose(A__ , A__ ) )
self.assertTrue(np.allclose(A__ , A__ ) )
self.assertTrue(np.allclose(A__ , A__ ) )
| 666 | from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFXLMRobertaModel
@require_tf
@require_sentencepiece
@require_tokenizers
class snake_case__ ( unittest.TestCase ):
@slow
def UpperCAmelCase__ ( self : int ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Dict = TFXLMRobertaModel.from_pretrained("jplu/tf-xlm-roberta-base" )
snake_case_ : Any = {
"input_ids": tf.convert_to_tensor([[0, 26_46, 1_02_69, 83, 9_99_42, 2]] , dtype=tf.intaa ), # "My dog is cute"
"attention_mask": tf.convert_to_tensor([[1, 1, 1, 1, 1, 1]] , dtype=tf.intaa ),
}
snake_case_ : List[str] = model(A__ )["last_hidden_state"]
snake_case_ : str = tf.TensorShape((1, 6, 7_68) )
self.assertEqual(output.shape , A__ )
# compare the actual values for a slice.
snake_case_ : List[str] = tf.convert_to_tensor(
[
[
[0.068_1762, 0.1089_4451, 0.0677_2504],
[-0.0642_3668, 0.0236_6615, 0.0432_9344],
[-0.0605_7295, 0.0997_4135, -0.0007_0584],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 666 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase = {
"configuration_blenderbot": [
"BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlenderbotConfig",
"BlenderbotOnnxConfig",
],
"tokenization_blenderbot": ["BlenderbotTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = ["BlenderbotTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
"BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlenderbotForCausalLM",
"BlenderbotForConditionalGeneration",
"BlenderbotModel",
"BlenderbotPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
"TFBlenderbotForConditionalGeneration",
"TFBlenderbotModel",
"TFBlenderbotPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
"FlaxBlenderbotForConditionalGeneration",
"FlaxBlenderbotModel",
"FlaxBlenderbotPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 666 | from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
UpperCAmelCase = logging.get_logger(__name__)
if is_vision_available():
import PIL
class snake_case__ ( _UpperCamelCase ):
_SCREAMING_SNAKE_CASE : Dict = ["pixel_values"]
def __init__( self : Union[str, Any] , A__ : bool = True , A__ : Dict[str, int] = None , A__ : PILImageResampling = PILImageResampling.BICUBIC , A__ : bool = True , A__ : Dict[str, int] = None , A__ : bool = True , A__ : Union[int, float] = 1 / 2_55 , A__ : bool = True , A__ : Optional[Union[float, List[float]]] = None , A__ : Optional[Union[float, List[float]]] = None , A__ : bool = True , **A__ : Optional[int] , ) -> None:
'''simple docstring'''
super().__init__(**A__ )
snake_case_ : str = size if size is not None else {"shortest_edge": 2_24}
snake_case_ : Union[str, Any] = get_size_dict(A__ , default_to_square=A__ )
snake_case_ : List[Any] = crop_size if crop_size is not None else {"height": 2_24, "width": 2_24}
snake_case_ : Dict = get_size_dict(A__ , default_to_square=A__ , param_name="crop_size" )
snake_case_ : str = do_resize
snake_case_ : str = size
snake_case_ : Optional[Any] = resample
snake_case_ : Any = do_center_crop
snake_case_ : Any = crop_size
snake_case_ : str = do_rescale
snake_case_ : Optional[Any] = rescale_factor
snake_case_ : int = do_normalize
snake_case_ : Optional[Any] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
snake_case_ : List[str] = image_std if image_std is not None else OPENAI_CLIP_STD
snake_case_ : int = do_convert_rgb
def UpperCAmelCase__ ( self : Optional[int] , A__ : np.ndarray , A__ : Dict[str, int] , A__ : PILImageResampling = PILImageResampling.BICUBIC , A__ : Optional[Union[str, ChannelDimension]] = None , **A__ : List[str] , ) -> np.ndarray:
'''simple docstring'''
snake_case_ : str = get_size_dict(A__ , default_to_square=A__ )
if "shortest_edge" not in size:
raise ValueError(f"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}" )
snake_case_ : str = get_resize_output_image_size(A__ , size=size["shortest_edge"] , default_to_square=A__ )
return resize(A__ , size=A__ , resample=A__ , data_format=A__ , **A__ )
def UpperCAmelCase__ ( self : Tuple , A__ : np.ndarray , A__ : Dict[str, int] , A__ : Optional[Union[str, ChannelDimension]] = None , **A__ : List[Any] , ) -> np.ndarray:
'''simple docstring'''
snake_case_ : Optional[int] = get_size_dict(A__ )
if "height" not in size or "width" not in size:
raise ValueError(f"The `size` parameter must contain the keys (height, width). Got {size.keys()}" )
return center_crop(A__ , size=(size["height"], size["width"]) , data_format=A__ , **A__ )
def UpperCAmelCase__ ( self : Optional[Any] , A__ : np.ndarray , A__ : Union[int, float] , A__ : Optional[Union[str, ChannelDimension]] = None , **A__ : List[str] , ) -> str:
'''simple docstring'''
return rescale(A__ , scale=A__ , data_format=A__ , **A__ )
def UpperCAmelCase__ ( self : Any , A__ : np.ndarray , A__ : Union[float, List[float]] , A__ : Union[float, List[float]] , A__ : Optional[Union[str, ChannelDimension]] = None , **A__ : Any , ) -> np.ndarray:
'''simple docstring'''
return normalize(A__ , mean=A__ , std=A__ , data_format=A__ , **A__ )
def UpperCAmelCase__ ( self : List[Any] , A__ : ImageInput , A__ : bool = None , A__ : Dict[str, int] = None , A__ : PILImageResampling = None , A__ : bool = None , A__ : int = None , A__ : bool = None , A__ : float = None , A__ : bool = None , A__ : Optional[Union[float, List[float]]] = None , A__ : Optional[Union[float, List[float]]] = None , A__ : bool = None , A__ : Optional[Union[str, TensorType]] = None , A__ : Optional[ChannelDimension] = ChannelDimension.FIRST , **A__ : Optional[Any] , ) -> PIL.Image.Image:
'''simple docstring'''
snake_case_ : List[Any] = do_resize if do_resize is not None else self.do_resize
snake_case_ : Union[str, Any] = size if size is not None else self.size
snake_case_ : Any = get_size_dict(A__ , param_name="size" , default_to_square=A__ )
snake_case_ : Optional[int] = resample if resample is not None else self.resample
snake_case_ : int = do_center_crop if do_center_crop is not None else self.do_center_crop
snake_case_ : List[str] = crop_size if crop_size is not None else self.crop_size
snake_case_ : Tuple = get_size_dict(A__ , param_name="crop_size" , default_to_square=A__ )
snake_case_ : Optional[int] = do_rescale if do_rescale is not None else self.do_rescale
snake_case_ : str = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case_ : List[Any] = do_normalize if do_normalize is not None else self.do_normalize
snake_case_ : Any = image_mean if image_mean is not None else self.image_mean
snake_case_ : List[str] = image_std if image_std is not None else self.image_std
snake_case_ : List[str] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
snake_case_ : List[Any] = make_list_of_images(A__ )
if not valid_images(A__ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
snake_case_ : Dict = [convert_to_rgb(A__ ) for image in images]
# All transformations expect numpy arrays.
snake_case_ : Dict = [to_numpy_array(A__ ) for image in images]
if do_resize:
snake_case_ : Dict = [self.resize(image=A__ , size=A__ , resample=A__ ) for image in images]
if do_center_crop:
snake_case_ : Tuple = [self.center_crop(image=A__ , size=A__ ) for image in images]
if do_rescale:
snake_case_ : str = [self.rescale(image=A__ , scale=A__ ) for image in images]
if do_normalize:
snake_case_ : int = [self.normalize(image=A__ , mean=A__ , std=A__ ) for image in images]
snake_case_ : List[Any] = [to_channel_dimension_format(A__ , A__ ) for image in images]
snake_case_ : Tuple = {"pixel_values": images}
return BatchFeature(data=A__ , tensor_type=A__ )
| 666 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class snake_case__ ( unittest.TestCase ):
def UpperCAmelCase__ ( self : str ) -> Optional[int]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCAmelCase__ ( self : List[str] ) -> str:
'''simple docstring'''
snake_case_ : str = 1
snake_case_ : List[Any] = 3
snake_case_ : Tuple = (32, 32)
snake_case_ : List[str] = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(A__ )
return image
@property
def UpperCAmelCase__ ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ : Tuple = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
return model
@property
def UpperCAmelCase__ ( self : str ) -> int:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ : Tuple = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
return model
@property
def UpperCAmelCase__ ( self : List[Any] ) -> Any:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ : List[Any] = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=50_06 , )
return RobertaSeriesModelWithTransformation(A__ )
@property
def UpperCAmelCase__ ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
def extract(*A__ : List[str] , **A__ : str ):
class snake_case__ :
def __init__( self : Any ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Tuple = torch.ones([0] )
def UpperCAmelCase__ ( self : Any , A__ : Dict ) -> Any:
'''simple docstring'''
self.pixel_values.to(A__ )
return self
return Out()
return extract
def UpperCAmelCase__ ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
snake_case_ : Optional[int] = "cpu" # ensure determinism for the device-dependent torch.Generator
snake_case_ : Dict = self.dummy_cond_unet
snake_case_ : int = PNDMScheduler(skip_prk_steps=A__ )
snake_case_ : Optional[Any] = self.dummy_vae
snake_case_ : List[Any] = self.dummy_text_encoder
snake_case_ : Union[str, Any] = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" )
snake_case_ : Dict = 77
snake_case_ : Optional[int] = self.dummy_image.to(A__ )
snake_case_ : Any = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
snake_case_ : int = AltDiffusionImgaImgPipeline(
unet=A__ , scheduler=A__ , vae=A__ , text_encoder=A__ , tokenizer=A__ , safety_checker=A__ , feature_extractor=self.dummy_extractor , )
snake_case_ : Any = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=A__ )
snake_case_ : Optional[Any] = alt_pipe.to(A__ )
alt_pipe.set_progress_bar_config(disable=A__ )
snake_case_ : Optional[Any] = "A painting of a squirrel eating a burger"
snake_case_ : Optional[Any] = torch.Generator(device=A__ ).manual_seed(0 )
snake_case_ : Dict = alt_pipe(
[prompt] , generator=A__ , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , image=A__ , )
snake_case_ : Optional[int] = output.images
snake_case_ : Union[str, Any] = torch.Generator(device=A__ ).manual_seed(0 )
snake_case_ : int = alt_pipe(
[prompt] , generator=A__ , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , image=A__ , return_dict=A__ , )[0]
snake_case_ : Any = image[0, -3:, -3:, -1]
snake_case_ : List[str] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
snake_case_ : Union[str, Any] = np.array([0.4427, 0.3731, 0.4249, 0.4941, 0.4546, 0.4148, 0.4193, 0.4666, 0.4499] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5E-3
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def UpperCAmelCase__ ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : int = self.dummy_cond_unet
snake_case_ : int = PNDMScheduler(skip_prk_steps=A__ )
snake_case_ : Optional[int] = self.dummy_vae
snake_case_ : str = self.dummy_text_encoder
snake_case_ : Union[str, Any] = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" )
snake_case_ : List[str] = 77
snake_case_ : List[str] = self.dummy_image.to(A__ )
# put models in fp16
snake_case_ : Any = unet.half()
snake_case_ : str = vae.half()
snake_case_ : Dict = bert.half()
# make sure here that pndm scheduler skips prk
snake_case_ : int = AltDiffusionImgaImgPipeline(
unet=A__ , scheduler=A__ , vae=A__ , text_encoder=A__ , tokenizer=A__ , safety_checker=A__ , feature_extractor=self.dummy_extractor , )
snake_case_ : Union[str, Any] = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=A__ )
snake_case_ : Union[str, Any] = alt_pipe.to(A__ )
alt_pipe.set_progress_bar_config(disable=A__ )
snake_case_ : Optional[int] = "A painting of a squirrel eating a burger"
snake_case_ : Optional[int] = torch.manual_seed(0 )
snake_case_ : Optional[int] = alt_pipe(
[prompt] , generator=A__ , num_inference_steps=2 , output_type="np" , image=A__ , ).images
assert image.shape == (1, 32, 32, 3)
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def UpperCAmelCase__ ( self : str ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Union[str, Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
# resize to resolution that is divisible by 8 but not 16 or 32
snake_case_ : int = init_image.resize((7_60, 5_04) )
snake_case_ : str = "BAAI/AltDiffusion"
snake_case_ : Union[str, Any] = AltDiffusionImgaImgPipeline.from_pretrained(
A__ , safety_checker=A__ , )
pipe.to(A__ )
pipe.set_progress_bar_config(disable=A__ )
pipe.enable_attention_slicing()
snake_case_ : Any = "A fantasy landscape, trending on artstation"
snake_case_ : List[Any] = torch.manual_seed(0 )
snake_case_ : Dict = pipe(
prompt=A__ , image=A__ , strength=0.75 , guidance_scale=7.5 , generator=A__ , output_type="np" , )
snake_case_ : List[str] = output.images[0]
snake_case_ : List[Any] = image[2_55:2_58, 3_83:3_86, -1]
assert image.shape == (5_04, 7_60, 3)
snake_case_ : Dict = np.array([0.9358, 0.9397, 0.9599, 0.9901, 1.0000, 1.0000, 0.9882, 1.0000, 1.0000] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class snake_case__ ( unittest.TestCase ):
def UpperCAmelCase__ ( self : List[str] ) -> List[str]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
snake_case_ : Optional[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
snake_case_ : str = init_image.resize((7_68, 5_12) )
snake_case_ : List[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy" )
snake_case_ : int = "BAAI/AltDiffusion"
snake_case_ : Dict = AltDiffusionImgaImgPipeline.from_pretrained(
A__ , safety_checker=A__ , )
pipe.to(A__ )
pipe.set_progress_bar_config(disable=A__ )
pipe.enable_attention_slicing()
snake_case_ : str = "A fantasy landscape, trending on artstation"
snake_case_ : Optional[Any] = torch.manual_seed(0 )
snake_case_ : Union[str, Any] = pipe(
prompt=A__ , image=A__ , strength=0.75 , guidance_scale=7.5 , generator=A__ , output_type="np" , )
snake_case_ : Union[str, Any] = output.images[0]
assert image.shape == (5_12, 7_68, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1E-2
| 666 | from __future__ import annotations
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: tuple[int, int] , lowerCAmelCase_: int ):
snake_case_ ,snake_case_ : Dict = position
snake_case_ : int = [
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
snake_case_ : Union[str, Any] = []
for position in positions:
snake_case_ ,snake_case_ : Union[str, Any] = position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(lowerCAmelCase_ )
return permissible_positions
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: list[list[int]] ):
return not any(elem == 0 for row in board for elem in row )
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: list[list[int]] , lowerCAmelCase_: tuple[int, int] , lowerCAmelCase_: int ):
if is_complete(lowerCAmelCase_ ):
return True
for position in get_valid_pos(lowerCAmelCase_ , len(lowerCAmelCase_ ) ):
snake_case_ ,snake_case_ : Dict = position
if board[y][x] == 0:
snake_case_ : List[str] = curr + 1
if open_knight_tour_helper(lowerCAmelCase_ , lowerCAmelCase_ , curr + 1 ):
return True
snake_case_ : Dict = 0
return False
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: int ):
snake_case_ : Any = [[0 for i in range(lowerCAmelCase_ )] for j in range(lowerCAmelCase_ )]
for i in range(lowerCAmelCase_ ):
for j in range(lowerCAmelCase_ ):
snake_case_ : Optional[Any] = 1
if open_knight_tour_helper(lowerCAmelCase_ , (i, j) , 1 ):
return board
snake_case_ : Dict = 0
snake_case_ : str = f"Open Kight Tour cannot be performed on a board of size {n}"
raise ValueError(lowerCAmelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 666 | 1 |
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
Pipeline,
ZeroShotClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
UpperCAmelCase = {"LayoutLMv2Config", "LayoutLMv3Config"}
@is_pipeline_test
class snake_case__ ( unittest.TestCase ):
_SCREAMING_SNAKE_CASE : str = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
_SCREAMING_SNAKE_CASE : Optional[Any] = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
_SCREAMING_SNAKE_CASE : Any = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
_SCREAMING_SNAKE_CASE : Optional[Any] = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
def UpperCAmelCase__ ( self : List[str] , A__ : int , A__ : Dict , A__ : str ) -> Any:
'''simple docstring'''
snake_case_ : Optional[int] = ZeroShotClassificationPipeline(
model=A__ , tokenizer=A__ , candidate_labels=["polics", "health"] )
return classifier, ["Who are you voting for in 2020?", "My stomach hurts."]
def UpperCAmelCase__ ( self : str , A__ : Optional[Any] , A__ : Tuple ) -> str:
'''simple docstring'''
snake_case_ : Optional[Any] = classifier("Who are you voting for in 2020?" , candidate_labels="politics" )
self.assertEqual(A__ , {"sequence": ANY(A__ ), "labels": [ANY(A__ )], "scores": [ANY(A__ )]} )
# No kwarg
snake_case_ : Optional[Any] = classifier("Who are you voting for in 2020?" , ["politics"] )
self.assertEqual(A__ , {"sequence": ANY(A__ ), "labels": [ANY(A__ )], "scores": [ANY(A__ )]} )
snake_case_ : Optional[int] = classifier("Who are you voting for in 2020?" , candidate_labels=["politics"] )
self.assertEqual(A__ , {"sequence": ANY(A__ ), "labels": [ANY(A__ )], "scores": [ANY(A__ )]} )
snake_case_ : int = classifier("Who are you voting for in 2020?" , candidate_labels="politics, public health" )
self.assertEqual(
A__ , {"sequence": ANY(A__ ), "labels": [ANY(A__ ), ANY(A__ )], "scores": [ANY(A__ ), ANY(A__ )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs["scores"] ) ) , 1.0 )
snake_case_ : Dict = classifier("Who are you voting for in 2020?" , candidate_labels=["politics", "public health"] )
self.assertEqual(
A__ , {"sequence": ANY(A__ ), "labels": [ANY(A__ ), ANY(A__ )], "scores": [ANY(A__ ), ANY(A__ )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs["scores"] ) ) , 1.0 )
snake_case_ : Union[str, Any] = classifier(
"Who are you voting for in 2020?" , candidate_labels="politics" , hypothesis_template="This text is about {}" )
self.assertEqual(A__ , {"sequence": ANY(A__ ), "labels": [ANY(A__ )], "scores": [ANY(A__ )]} )
# https://github.com/huggingface/transformers/issues/13846
snake_case_ : Tuple = classifier(["I am happy"] , ["positive", "negative"] )
self.assertEqual(
A__ , [
{"sequence": ANY(A__ ), "labels": [ANY(A__ ), ANY(A__ )], "scores": [ANY(A__ ), ANY(A__ )]}
for i in range(1 )
] , )
snake_case_ : int = classifier(["I am happy", "I am sad"] , ["positive", "negative"] )
self.assertEqual(
A__ , [
{"sequence": ANY(A__ ), "labels": [ANY(A__ ), ANY(A__ )], "scores": [ANY(A__ ), ANY(A__ )]}
for i in range(2 )
] , )
with self.assertRaises(A__ ):
classifier("" , candidate_labels="politics" )
with self.assertRaises(A__ ):
classifier(A__ , candidate_labels="politics" )
with self.assertRaises(A__ ):
classifier("Who are you voting for in 2020?" , candidate_labels="" )
with self.assertRaises(A__ ):
classifier("Who are you voting for in 2020?" , candidate_labels=A__ )
with self.assertRaises(A__ ):
classifier(
"Who are you voting for in 2020?" , candidate_labels="politics" , hypothesis_template="Not formatting template" , )
with self.assertRaises(A__ ):
classifier(
"Who are you voting for in 2020?" , candidate_labels="politics" , hypothesis_template=A__ , )
self.run_entailment_id(A__ )
def UpperCAmelCase__ ( self : Optional[int] , A__ : Pipeline ) -> Tuple:
'''simple docstring'''
snake_case_ : Optional[int] = zero_shot_classifier.model.config
snake_case_ : Any = config.labelaid
snake_case_ : Dict = zero_shot_classifier.entailment_id
snake_case_ : List[Any] = {"LABEL_0": 0, "LABEL_1": 1, "LABEL_2": 2}
self.assertEqual(zero_shot_classifier.entailment_id , -1 )
snake_case_ : Tuple = {"entailment": 0, "neutral": 1, "contradiction": 2}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
snake_case_ : str = {"ENTAIL": 0, "NON-ENTAIL": 1}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
snake_case_ : List[str] = {"ENTAIL": 2, "NEUTRAL": 1, "CONTR": 0}
self.assertEqual(zero_shot_classifier.entailment_id , 2 )
snake_case_ : List[Any] = original_labelaid
self.assertEqual(A__ , zero_shot_classifier.entailment_id )
@require_torch
def UpperCAmelCase__ ( self : Any ) -> str:
'''simple docstring'''
snake_case_ : Any = pipeline(
"zero-shot-classification" , model="sshleifer/tiny-distilbert-base-cased-distilled-squad" , framework="pt" , )
# There was a regression in 4.10 for this
# Adding a test so we don't make the mistake again.
# https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499
zero_shot_classifier(
"Who are you voting for in 2020?" * 1_00 , candidate_labels=["politics", "public health", "science"] )
@require_torch
def UpperCAmelCase__ ( self : int ) -> Tuple:
'''simple docstring'''
snake_case_ : Any = pipeline(
"zero-shot-classification" , model="sshleifer/tiny-distilbert-base-cased-distilled-squad" , framework="pt" , )
snake_case_ : Dict = zero_shot_classifier(
"Who are you voting for in 2020?" , candidate_labels=["politics", "public health", "science"] )
self.assertEqual(
nested_simplify(A__ ) , {
"sequence": "Who are you voting for in 2020?",
"labels": ["science", "public health", "politics"],
"scores": [0.333, 0.333, 0.333],
} , )
@require_tf
def UpperCAmelCase__ ( self : Any ) -> List[Any]:
'''simple docstring'''
snake_case_ : List[str] = pipeline(
"zero-shot-classification" , model="sshleifer/tiny-distilbert-base-cased-distilled-squad" , framework="tf" , )
snake_case_ : Any = zero_shot_classifier(
"Who are you voting for in 2020?" , candidate_labels=["politics", "public health", "science"] )
self.assertEqual(
nested_simplify(A__ ) , {
"sequence": "Who are you voting for in 2020?",
"labels": ["science", "public health", "politics"],
"scores": [0.333, 0.333, 0.333],
} , )
@slow
@require_torch
def UpperCAmelCase__ ( self : int ) -> Dict:
'''simple docstring'''
snake_case_ : Tuple = pipeline("zero-shot-classification" , model="roberta-large-mnli" , framework="pt" )
snake_case_ : Union[str, Any] = zero_shot_classifier(
"Who are you voting for in 2020?" , candidate_labels=["politics", "public health", "science"] )
self.assertEqual(
nested_simplify(A__ ) , {
"sequence": "Who are you voting for in 2020?",
"labels": ["politics", "public health", "science"],
"scores": [0.976, 0.015, 0.009],
} , )
snake_case_ : List[str] = zero_shot_classifier(
"The dominant sequence transduction models are based on complex recurrent or convolutional neural networks"
" in an encoder-decoder configuration. The best performing models also connect the encoder and decoder"
" through an attention mechanism. We propose a new simple network architecture, the Transformer, based"
" solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two"
" machine translation tasks show these models to be superior in quality while being more parallelizable"
" and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014"
" English-to-German translation task, improving over the existing best results, including ensembles by"
" over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new"
" single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small"
" fraction of the training costs of the best models from the literature. We show that the Transformer"
" generalizes well to other tasks by applying it successfully to English constituency parsing both with"
" large and limited training data." , candidate_labels=["machine learning", "statistics", "translation", "vision"] , multi_label=A__ , )
self.assertEqual(
nested_simplify(A__ ) , {
"sequence": (
"The dominant sequence transduction models are based on complex recurrent or convolutional neural"
" networks in an encoder-decoder configuration. The best performing models also connect the"
" encoder and decoder through an attention mechanism. We propose a new simple network"
" architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence"
" and convolutions entirely. Experiments on two machine translation tasks show these models to be"
" superior in quality while being more parallelizable and requiring significantly less time to"
" train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,"
" improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014"
" English-to-French translation task, our model establishes a new single-model state-of-the-art"
" BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training"
" costs of the best models from the literature. We show that the Transformer generalizes well to"
" other tasks by applying it successfully to English constituency parsing both with large and"
" limited training data."
),
"labels": ["translation", "machine learning", "vision", "statistics"],
"scores": [0.817, 0.713, 0.018, 0.018],
} , )
@slow
@require_tf
def UpperCAmelCase__ ( self : Tuple ) -> Any:
'''simple docstring'''
snake_case_ : Any = pipeline("zero-shot-classification" , model="roberta-large-mnli" , framework="tf" )
snake_case_ : int = zero_shot_classifier(
"Who are you voting for in 2020?" , candidate_labels=["politics", "public health", "science"] )
self.assertEqual(
nested_simplify(A__ ) , {
"sequence": "Who are you voting for in 2020?",
"labels": ["politics", "public health", "science"],
"scores": [0.976, 0.015, 0.009],
} , )
snake_case_ : List[Any] = zero_shot_classifier(
"The dominant sequence transduction models are based on complex recurrent or convolutional neural networks"
" in an encoder-decoder configuration. The best performing models also connect the encoder and decoder"
" through an attention mechanism. We propose a new simple network architecture, the Transformer, based"
" solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two"
" machine translation tasks show these models to be superior in quality while being more parallelizable"
" and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014"
" English-to-German translation task, improving over the existing best results, including ensembles by"
" over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new"
" single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small"
" fraction of the training costs of the best models from the literature. We show that the Transformer"
" generalizes well to other tasks by applying it successfully to English constituency parsing both with"
" large and limited training data." , candidate_labels=["machine learning", "statistics", "translation", "vision"] , multi_label=A__ , )
self.assertEqual(
nested_simplify(A__ ) , {
"sequence": (
"The dominant sequence transduction models are based on complex recurrent or convolutional neural"
" networks in an encoder-decoder configuration. The best performing models also connect the"
" encoder and decoder through an attention mechanism. We propose a new simple network"
" architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence"
" and convolutions entirely. Experiments on two machine translation tasks show these models to be"
" superior in quality while being more parallelizable and requiring significantly less time to"
" train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,"
" improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014"
" English-to-French translation task, our model establishes a new single-model state-of-the-art"
" BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training"
" costs of the best models from the literature. We show that the Transformer generalizes well to"
" other tasks by applying it successfully to English constituency parsing both with large and"
" limited training data."
),
"labels": ["translation", "machine learning", "vision", "statistics"],
"scores": [0.817, 0.713, 0.018, 0.018],
} , )
| 666 | from ...configuration_utils import PretrainedConfig
class snake_case__ ( _UpperCamelCase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = "bert-generation"
def __init__( self : Optional[int] , A__ : List[Any]=5_03_58 , A__ : Any=10_24 , A__ : Any=24 , A__ : List[Any]=16 , A__ : List[Any]=40_96 , A__ : int="gelu" , A__ : List[str]=0.1 , A__ : List[str]=0.1 , A__ : str=5_12 , A__ : int=0.02 , A__ : Any=1E-12 , A__ : Optional[Any]=0 , A__ : List[str]=2 , A__ : Optional[int]=1 , A__ : str="absolute" , A__ : Any=True , **A__ : Optional[Any] , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(pad_token_id=A__ , bos_token_id=A__ , eos_token_id=A__ , **A__ )
snake_case_ : str = vocab_size
snake_case_ : int = hidden_size
snake_case_ : Optional[Any] = num_hidden_layers
snake_case_ : Union[str, Any] = num_attention_heads
snake_case_ : Optional[Any] = hidden_act
snake_case_ : Tuple = intermediate_size
snake_case_ : str = hidden_dropout_prob
snake_case_ : Optional[Any] = attention_probs_dropout_prob
snake_case_ : str = max_position_embeddings
snake_case_ : Optional[Any] = initializer_range
snake_case_ : Optional[int] = layer_norm_eps
snake_case_ : str = position_embedding_type
snake_case_ : Dict = use_cache
| 666 | 1 |
import argparse
import logging
import os
import re
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
DataCollatorForLanguageModeling,
PushToHubCallback,
TFAutoModelForMaskedLM,
create_optimizer,
)
UpperCAmelCase = logging.getLogger(__name__)
UpperCAmelCase = tf.data.AUTOTUNE
def SCREAMING_SNAKE_CASE_ ( ):
snake_case_ : List[str] = argparse.ArgumentParser(description="Train a masked language model on TPU." )
parser.add_argument(
"--pretrained_model_config" , type=lowerCAmelCase_ , default="roberta-base" , help="The model config to use. Note that we don't copy the model's weights, only the config!" , )
parser.add_argument(
"--tokenizer" , type=lowerCAmelCase_ , default="unigram-tokenizer-wikitext" , help="The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model's vocab size." , )
parser.add_argument(
"--per_replica_batch_size" , type=lowerCAmelCase_ , default=8 , help="Batch size per TPU core." , )
parser.add_argument(
"--no_tpu" , action="store_true" , help="If set, run on CPU and don't try to initialize a TPU. Useful for debugging on non-TPU instances." , )
parser.add_argument(
"--tpu_name" , type=lowerCAmelCase_ , help="Name of TPU resource to initialize. Should be blank on Colab, and 'local' on TPU VMs." , default="local" , )
parser.add_argument(
"--tpu_zone" , type=lowerCAmelCase_ , help="Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes." , )
parser.add_argument(
"--gcp_project" , type=lowerCAmelCase_ , help="Google cloud project name. Only used for non-Colab TPU nodes." )
parser.add_argument(
"--bfloat16" , action="store_true" , help="Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU." , )
parser.add_argument(
"--train_dataset" , type=lowerCAmelCase_ , help="Path to training dataset to load. If the path begins with `gs://`"
" then the dataset will be loaded from a Google Cloud Storage bucket." , )
parser.add_argument(
"--shuffle_buffer_size" , type=lowerCAmelCase_ , default=2**1_8 , help="Size of the shuffle buffer (in samples)" , )
parser.add_argument(
"--eval_dataset" , type=lowerCAmelCase_ , help="Path to evaluation dataset to load. If the path begins with `gs://`"
" then the dataset will be loaded from a Google Cloud Storage bucket." , )
parser.add_argument(
"--num_epochs" , type=lowerCAmelCase_ , default=1 , help="Number of epochs to train for." , )
parser.add_argument(
"--learning_rate" , type=lowerCAmelCase_ , default=1e-4 , help="Learning rate to use for training." , )
parser.add_argument(
"--weight_decay_rate" , type=lowerCAmelCase_ , default=1e-3 , help="Weight decay rate to use for training." , )
parser.add_argument(
"--max_length" , type=lowerCAmelCase_ , default=5_1_2 , help="Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py" , )
parser.add_argument(
"--mlm_probability" , type=lowerCAmelCase_ , default=0.1_5 , help="Fraction of tokens to mask during training." , )
parser.add_argument("--output_dir" , type=lowerCAmelCase_ , required=lowerCAmelCase_ , help="Path to save model checkpoints to." )
parser.add_argument("--hub_model_id" , type=lowerCAmelCase_ , help="Model ID to upload to on the Hugging Face Hub." )
snake_case_ : Tuple = parser.parse_args()
return args
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: Tuple ):
try:
if args.tpu_name:
snake_case_ : Dict = tf.distribute.cluster_resolver.TPUClusterResolver(
args.tpu_name , zone=args.tpu_zone , project=args.gcp_project )
else:
snake_case_ : List[Any] = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
raise RuntimeError(
"Couldn't connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or "
"--gcp_project. When running on a TPU VM, use --tpu_name local." )
tf.config.experimental_connect_to_cluster(lowerCAmelCase_ )
tf.tpu.experimental.initialize_tpu_system(lowerCAmelCase_ )
return tpu
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: Tuple ):
snake_case_ : Dict = 0
for file in file_list:
snake_case_ : Dict = file.split("/" )[-1]
snake_case_ : int = re.search(R"-\d+-(\d+)\.tfrecord" , lowerCAmelCase_ ).group(1 )
snake_case_ : Tuple = int(lowerCAmelCase_ )
num_samples += sample_count
return num_samples
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: Union[str, Any] , lowerCAmelCase_: Optional[int] , lowerCAmelCase_: int , lowerCAmelCase_: Any , lowerCAmelCase_: Tuple , lowerCAmelCase_: Union[str, Any]=None ):
snake_case_ : Optional[int] = count_samples(lowerCAmelCase_ )
snake_case_ : Dict = tf.data.Dataset.from_tensor_slices(lowerCAmelCase_ )
if shuffle:
snake_case_ : List[Any] = dataset.shuffle(len(lowerCAmelCase_ ) )
snake_case_ : Optional[int] = tf.data.TFRecordDataset(lowerCAmelCase_ , num_parallel_reads=lowerCAmelCase_ )
# TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here
snake_case_ : str = dataset.apply(tf.data.experimental.assert_cardinality(lowerCAmelCase_ ) )
snake_case_ : List[str] = dataset.map(lowerCAmelCase_ , num_parallel_calls=lowerCAmelCase_ )
if shuffle:
assert shuffle_buffer_size is not None
snake_case_ : Union[str, Any] = dataset.shuffle(args.shuffle_buffer_size )
snake_case_ : Any = dataset.batch(lowerCAmelCase_ , drop_remainder=lowerCAmelCase_ )
snake_case_ : List[Any] = dataset.map(lowerCAmelCase_ , num_parallel_calls=lowerCAmelCase_ )
snake_case_ : str = dataset.prefetch(lowerCAmelCase_ )
return dataset
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: Optional[int] ):
if not args.no_tpu:
snake_case_ : str = initialize_tpu(lowerCAmelCase_ )
snake_case_ : Any = tf.distribute.TPUStrategy(lowerCAmelCase_ )
else:
snake_case_ : Union[str, Any] = tf.distribute.OneDeviceStrategy(device="/gpu:0" )
if args.bfloataa:
tf.keras.mixed_precision.set_global_policy("mixed_bfloat16" )
snake_case_ : Any = AutoTokenizer.from_pretrained(args.tokenizer )
snake_case_ : Union[str, Any] = AutoConfig.from_pretrained(args.pretrained_model_config )
snake_case_ : str = tokenizer.vocab_size
snake_case_ : Any = tf.io.gfile.glob(os.path.join(args.train_dataset , "*.tfrecord" ) )
if not training_records:
raise ValueError(f"No .tfrecord files found in {args.train_dataset}." )
snake_case_ : Union[str, Any] = tf.io.gfile.glob(os.path.join(args.eval_dataset , "*.tfrecord" ) )
if not eval_records:
raise ValueError(f"No .tfrecord files found in {args.eval_dataset}." )
snake_case_ : Optional[Any] = count_samples(lowerCAmelCase_ )
snake_case_ : Tuple = num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync)
snake_case_ : Any = steps_per_epoch * args.num_epochs
with strategy.scope():
snake_case_ : List[str] = TFAutoModelForMaskedLM.from_config(lowerCAmelCase_ )
model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built
snake_case_ ,snake_case_ : int = create_optimizer(
num_train_steps=lowerCAmelCase_ , num_warmup_steps=total_train_steps // 2_0 , init_lr=args.learning_rate , weight_decay_rate=args.weight_decay_rate , )
# Transformers models compute the right loss for their task by default when labels are passed, and will
# use this for training unless you specify your own loss function in compile().
model.compile(optimizer=lowerCAmelCase_ , metrics=["accuracy"] )
def decode_fn(lowerCAmelCase_: Tuple ):
snake_case_ : List[Any] = {
"input_ids": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
"attention_mask": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
}
return tf.io.parse_single_example(lowerCAmelCase_ , lowerCAmelCase_ )
# Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can
# use their methods in our data pipeline.
snake_case_ : int = DataCollatorForLanguageModeling(
tokenizer=lowerCAmelCase_ , mlm_probability=args.mlm_probability , mlm=lowerCAmelCase_ , return_tensors="tf" )
def mask_with_collator(lowerCAmelCase_: Any ):
# TF really needs an isin() function
snake_case_ : List[Any] = (
~tf.cast(batch["attention_mask"] , tf.bool )
| (batch["input_ids"] == tokenizer.cls_token_id)
| (batch["input_ids"] == tokenizer.sep_token_id)
)
snake_case_ ,snake_case_ : Dict = data_collator.tf_mask_tokens(
batch["input_ids"] , vocab_size=len(lowerCAmelCase_ ) , mask_token_id=tokenizer.mask_token_id , special_tokens_mask=lowerCAmelCase_ , )
return batch
snake_case_ : Dict = args.per_replica_batch_size * strategy.num_replicas_in_sync
snake_case_ : str = prepare_dataset(
lowerCAmelCase_ , decode_fn=lowerCAmelCase_ , mask_fn=lowerCAmelCase_ , batch_size=lowerCAmelCase_ , shuffle=lowerCAmelCase_ , shuffle_buffer_size=args.shuffle_buffer_size , )
snake_case_ : Union[str, Any] = prepare_dataset(
lowerCAmelCase_ , decode_fn=lowerCAmelCase_ , mask_fn=lowerCAmelCase_ , batch_size=lowerCAmelCase_ , shuffle=lowerCAmelCase_ , )
snake_case_ : str = []
if args.hub_model_id:
callbacks.append(
PushToHubCallback(output_dir=args.output_dir , hub_model_id=args.hub_model_id , tokenizer=lowerCAmelCase_ ) )
model.fit(
lowerCAmelCase_ , validation_data=lowerCAmelCase_ , epochs=args.num_epochs , callbacks=lowerCAmelCase_ , )
model.save_pretrained(args.output_dir )
if __name__ == "__main__":
UpperCAmelCase = parse_args()
main(args)
| 666 | import math
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: int ):
snake_case_ : Any = []
snake_case_ : List[str] = 2
snake_case_ : Optional[int] = int(math.sqrt(lowerCAmelCase_ ) ) # Size of every segment
snake_case_ : str = [True] * (end + 1)
snake_case_ : Any = []
while start <= end:
if temp[start] is True:
in_prime.append(lowerCAmelCase_ )
for i in range(start * start , end + 1 , lowerCAmelCase_ ):
snake_case_ : Union[str, Any] = False
start += 1
prime += in_prime
snake_case_ : Dict = end + 1
snake_case_ : Dict = min(2 * end , lowerCAmelCase_ )
while low <= n:
snake_case_ : Any = [True] * (high - low + 1)
for each in in_prime:
snake_case_ : Optional[Any] = math.floor(low / each ) * each
if t < low:
t += each
for j in range(lowerCAmelCase_ , high + 1 , lowerCAmelCase_ ):
snake_case_ : List[Any] = False
for j in range(len(lowerCAmelCase_ ) ):
if temp[j] is True:
prime.append(j + low )
snake_case_ : int = high + 1
snake_case_ : Union[str, Any] = min(high + end , lowerCAmelCase_ )
return prime
print(sieve(1_0**6))
| 666 | 1 |
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class snake_case__ ( _UpperCamelCase , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : str = RoCBertTokenizer
_SCREAMING_SNAKE_CASE : Dict = None
_SCREAMING_SNAKE_CASE : str = False
_SCREAMING_SNAKE_CASE : str = True
_SCREAMING_SNAKE_CASE : List[str] = filter_non_english
def UpperCAmelCase__ ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
super().setUp()
snake_case_ : str = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "你", "好", "是", "谁", "a", "b", "c", "d"]
snake_case_ : Tuple = {}
snake_case_ : List[str] = {}
for i, value in enumerate(A__ ):
snake_case_ : Union[str, Any] = i
snake_case_ : Optional[Any] = i
snake_case_ : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
snake_case_ : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["word_shape_file"] )
snake_case_ : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["word_pronunciation_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
with open(self.word_shape_file , "w" , encoding="utf-8" ) as word_shape_writer:
json.dump(A__ , A__ , ensure_ascii=A__ )
with open(self.word_pronunciation_file , "w" , encoding="utf-8" ) as word_pronunciation_writer:
json.dump(A__ , A__ , ensure_ascii=A__ )
def UpperCAmelCase__ ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Tuple = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
snake_case_ : Tuple = tokenizer.tokenize("你好[SEP]你是谁" )
self.assertListEqual(A__ , ["你", "好", "[SEP]", "你", "是", "谁"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A__ ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(A__ ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(A__ ) , [5, 6, 2, 5, 7, 8] )
def UpperCAmelCase__ ( self : List[str] ) -> Dict:
'''simple docstring'''
snake_case_ : Dict = RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz" ) , ["ah", "\u535A", "\u63A8", "zz"] )
def UpperCAmelCase__ ( self : int ) -> str:
'''simple docstring'''
snake_case_ : Dict = RoCBertBasicTokenizer(do_lower_case=A__ )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["hello", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def UpperCAmelCase__ ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
snake_case_ : List[Any] = RoCBertBasicTokenizer(do_lower_case=A__ , strip_accents=A__ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hällo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["h\u00E9llo"] )
def UpperCAmelCase__ ( self : Dict ) -> Any:
'''simple docstring'''
snake_case_ : List[str] = RoCBertBasicTokenizer(do_lower_case=A__ , strip_accents=A__ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def UpperCAmelCase__ ( self : Dict ) -> Any:
'''simple docstring'''
snake_case_ : Tuple = RoCBertBasicTokenizer(do_lower_case=A__ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def UpperCAmelCase__ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
snake_case_ : Union[str, Any] = RoCBertBasicTokenizer(do_lower_case=A__ )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["HeLLo", "!", "how", "Are", "yoU", "?"] )
def UpperCAmelCase__ ( self : Dict ) -> Any:
'''simple docstring'''
snake_case_ : Any = RoCBertBasicTokenizer(do_lower_case=A__ , strip_accents=A__ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HäLLo", "!", "how", "Are", "yoU", "?"] )
def UpperCAmelCase__ ( self : str ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : str = RoCBertBasicTokenizer(do_lower_case=A__ , strip_accents=A__ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HaLLo", "!", "how", "Are", "yoU", "?"] )
def UpperCAmelCase__ ( self : List[str] ) -> Any:
'''simple docstring'''
snake_case_ : str = RoCBertBasicTokenizer(do_lower_case=A__ , never_split=["[UNK]"] )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]" ) , ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"] )
def UpperCAmelCase__ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Dict = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"]
snake_case_ : Tuple = {}
for i, token in enumerate(A__ ):
snake_case_ : Dict = i
snake_case_ : str = RoCBertWordpieceTokenizer(vocab=A__ , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("unwanted running" ) , ["un", "##want", "##ed", "runn", "##ing"] )
self.assertListEqual(tokenizer.tokenize("unwantedX running" ) , ["[UNK]", "runn", "##ing"] )
def UpperCAmelCase__ ( self : str ) -> List[str]:
'''simple docstring'''
self.assertTrue(_is_whitespace(" " ) )
self.assertTrue(_is_whitespace("\t" ) )
self.assertTrue(_is_whitespace("\r" ) )
self.assertTrue(_is_whitespace("\n" ) )
self.assertTrue(_is_whitespace("\u00A0" ) )
self.assertFalse(_is_whitespace("A" ) )
self.assertFalse(_is_whitespace("-" ) )
def UpperCAmelCase__ ( self : Any ) -> Tuple:
'''simple docstring'''
self.assertTrue(_is_control("\u0005" ) )
self.assertFalse(_is_control("A" ) )
self.assertFalse(_is_control(" " ) )
self.assertFalse(_is_control("\t" ) )
self.assertFalse(_is_control("\r" ) )
def UpperCAmelCase__ ( self : Dict ) -> List[Any]:
'''simple docstring'''
self.assertTrue(_is_punctuation("-" ) )
self.assertTrue(_is_punctuation("$" ) )
self.assertTrue(_is_punctuation("`" ) )
self.assertTrue(_is_punctuation("." ) )
self.assertFalse(_is_punctuation("A" ) )
self.assertFalse(_is_punctuation(" " ) )
def UpperCAmelCase__ ( self : List[str] ) -> str:
'''simple docstring'''
snake_case_ : List[str] = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(A__ ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] )
if self.test_rust_tokenizer:
snake_case_ : List[str] = self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(A__ ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] )
def UpperCAmelCase__ ( self : List[str] ) -> Tuple:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
snake_case_ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(A__ , **A__ )
snake_case_ : List[Any] = f"A, naïve {tokenizer_r.mask_token} AllenNLP sentence."
snake_case_ : List[str] = tokenizer_r.encode_plus(
A__ , return_attention_mask=A__ , return_token_type_ids=A__ , return_offsets_mapping=A__ , add_special_tokens=A__ , )
snake_case_ : List[Any] = tokenizer_r.do_lower_case if hasattr(A__ , "do_lower_case" ) else False
snake_case_ : str = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), "A"),
((1, 2), ","),
((3, 5), "na"),
((5, 6), "##ï"),
((6, 8), "##ve"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "Allen"),
((21, 23), "##NL"),
((23, 24), "##P"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), "a"),
((1, 2), ","),
((3, 8), "naive"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "allen"),
((21, 23), "##nl"),
((23, 24), "##p"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["input_ids"] ) )
self.assertEqual([e[0] for e in expected_results] , tokens["offset_mapping"] )
def UpperCAmelCase__ ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : int = ["的", "人", "有"]
snake_case_ : Optional[Any] = "".join(A__ )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
snake_case_ : Tuple = True
snake_case_ : str = self.tokenizer_class.from_pretrained(A__ , **A__ )
snake_case_ : int = self.rust_tokenizer_class.from_pretrained(A__ , **A__ )
snake_case_ : List[str] = tokenizer_p.encode(A__ , add_special_tokens=A__ )
snake_case_ : int = tokenizer_r.encode(A__ , add_special_tokens=A__ )
snake_case_ : Union[str, Any] = tokenizer_r.convert_ids_to_tokens(A__ )
snake_case_ : int = tokenizer_p.convert_ids_to_tokens(A__ )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(A__ , A__ )
self.assertListEqual(A__ , A__ )
snake_case_ : str = False
snake_case_ : Any = self.rust_tokenizer_class.from_pretrained(A__ , **A__ )
snake_case_ : Tuple = self.tokenizer_class.from_pretrained(A__ , **A__ )
snake_case_ : Optional[Any] = tokenizer_r.encode(A__ , add_special_tokens=A__ )
snake_case_ : List[str] = tokenizer_p.encode(A__ , add_special_tokens=A__ )
snake_case_ : Union[str, Any] = tokenizer_r.convert_ids_to_tokens(A__ )
snake_case_ : Dict = tokenizer_p.convert_ids_to_tokens(A__ )
# it is expected that only the first Chinese character is not preceded by "##".
snake_case_ : Optional[Any] = [
f"##{token}" if idx != 0 else token for idx, token in enumerate(A__ )
]
self.assertListEqual(A__ , A__ )
self.assertListEqual(A__ , A__ )
@slow
def UpperCAmelCase__ ( self : Tuple ) -> Dict:
'''simple docstring'''
snake_case_ : Tuple = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
snake_case_ : Tuple = tokenizer.encode("你好" , add_special_tokens=A__ )
snake_case_ : Union[str, Any] = tokenizer.encode("你是谁" , add_special_tokens=A__ )
snake_case_ : Tuple = tokenizer.build_inputs_with_special_tokens(A__ )
snake_case_ : List[str] = tokenizer.build_inputs_with_special_tokens(A__ , A__ )
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def UpperCAmelCase__ ( self : Tuple ) -> Any:
'''simple docstring'''
snake_case_ : str = self.get_tokenizers(do_lower_case=A__ )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
snake_case_ : Union[str, Any] = "你好,你是谁"
snake_case_ : Any = tokenizer.tokenize(A__ )
snake_case_ : Optional[int] = tokenizer.convert_tokens_to_ids(A__ )
snake_case_ : Dict = tokenizer.convert_tokens_to_shape_ids(A__ )
snake_case_ : int = tokenizer.convert_tokens_to_pronunciation_ids(A__ )
snake_case_ : int = tokenizer.prepare_for_model(
A__ , A__ , A__ , add_special_tokens=A__ )
snake_case_ : Tuple = tokenizer.encode_plus(A__ , add_special_tokens=A__ )
self.assertEqual(A__ , A__ )
| 666 | import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ConditionalDetrImageProcessor
class snake_case__ ( unittest.TestCase ):
def __init__( self : List[str] , A__ : List[Any] , A__ : int=7 , A__ : Union[str, Any]=3 , A__ : List[str]=30 , A__ : Optional[int]=4_00 , A__ : Optional[Any]=True , A__ : Optional[int]=None , A__ : Optional[Any]=True , A__ : Any=[0.5, 0.5, 0.5] , A__ : int=[0.5, 0.5, 0.5] , A__ : Any=True , A__ : int=1 / 2_55 , A__ : List[str]=True , ) -> Dict:
'''simple docstring'''
snake_case_ : int = size if size is not None else {"shortest_edge": 18, "longest_edge": 13_33}
snake_case_ : Any = parent
snake_case_ : Optional[int] = batch_size
snake_case_ : List[Any] = num_channels
snake_case_ : Union[str, Any] = min_resolution
snake_case_ : List[Any] = max_resolution
snake_case_ : Tuple = do_resize
snake_case_ : Dict = size
snake_case_ : Optional[Any] = do_normalize
snake_case_ : int = image_mean
snake_case_ : List[Any] = image_std
snake_case_ : Tuple = do_rescale
snake_case_ : Any = rescale_factor
snake_case_ : Optional[int] = do_pad
def UpperCAmelCase__ ( self : int ) -> List[str]:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def UpperCAmelCase__ ( self : Optional[int] , A__ : Optional[int] , A__ : Any=False ) -> Optional[Any]:
'''simple docstring'''
if not batched:
snake_case_ : Any = image_inputs[0]
if isinstance(A__ , Image.Image ):
snake_case_ ,snake_case_ : Dict = image.size
else:
snake_case_ ,snake_case_ : int = image.shape[1], image.shape[2]
if w < h:
snake_case_ : Dict = int(self.size["shortest_edge"] * h / w )
snake_case_ : Optional[int] = self.size["shortest_edge"]
elif w > h:
snake_case_ : Optional[int] = self.size["shortest_edge"]
snake_case_ : str = int(self.size["shortest_edge"] * w / h )
else:
snake_case_ : Optional[int] = self.size["shortest_edge"]
snake_case_ : List[Any] = self.size["shortest_edge"]
else:
snake_case_ : str = []
for image in image_inputs:
snake_case_ ,snake_case_ : Tuple = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
snake_case_ : List[Any] = max(A__ , key=lambda A__ : item[0] )[0]
snake_case_ : int = max(A__ , key=lambda A__ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class snake_case__ ( _UpperCamelCase , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : Optional[int] = ConditionalDetrImageProcessor if is_vision_available() else None
def UpperCAmelCase__ ( self : Tuple ) -> Dict:
'''simple docstring'''
snake_case_ : List[str] = ConditionalDetrImageProcessingTester(self )
@property
def UpperCAmelCase__ ( self : Dict ) -> Tuple:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase__ ( self : Any ) -> Tuple:
'''simple docstring'''
snake_case_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A__ , "image_mean" ) )
self.assertTrue(hasattr(A__ , "image_std" ) )
self.assertTrue(hasattr(A__ , "do_normalize" ) )
self.assertTrue(hasattr(A__ , "do_resize" ) )
self.assertTrue(hasattr(A__ , "size" ) )
def UpperCAmelCase__ ( self : List[str] ) -> Tuple:
'''simple docstring'''
snake_case_ : List[str] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 13_33} )
self.assertEqual(image_processor.do_pad , A__ )
snake_case_ : Optional[int] = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=A__ )
self.assertEqual(image_processor.size , {"shortest_edge": 42, "longest_edge": 84} )
self.assertEqual(image_processor.do_pad , A__ )
def UpperCAmelCase__ ( self : str ) -> Optional[int]:
'''simple docstring'''
pass
def UpperCAmelCase__ ( self : Dict ) -> Tuple:
'''simple docstring'''
snake_case_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case_ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=A__ )
for image in image_inputs:
self.assertIsInstance(A__ , Image.Image )
# Test not batched input
snake_case_ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case_ ,snake_case_ : Optional[Any] = self.image_processor_tester.get_expected_values(A__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case_ ,snake_case_ : List[Any] = self.image_processor_tester.get_expected_values(A__ , batched=A__ )
snake_case_ : int = image_processing(A__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase__ ( self : int ) -> Any:
'''simple docstring'''
snake_case_ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case_ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=A__ , numpify=A__ )
for image in image_inputs:
self.assertIsInstance(A__ , np.ndarray )
# Test not batched input
snake_case_ : int = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case_ ,snake_case_ : List[str] = self.image_processor_tester.get_expected_values(A__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case_ : Optional[int] = image_processing(A__ , return_tensors="pt" ).pixel_values
snake_case_ ,snake_case_ : Dict = self.image_processor_tester.get_expected_values(A__ , batched=A__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase__ ( self : Tuple ) -> str:
'''simple docstring'''
snake_case_ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case_ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=A__ , torchify=A__ )
for image in image_inputs:
self.assertIsInstance(A__ , torch.Tensor )
# Test not batched input
snake_case_ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case_ ,snake_case_ : List[Any] = self.image_processor_tester.get_expected_values(A__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case_ : Any = image_processing(A__ , return_tensors="pt" ).pixel_values
snake_case_ ,snake_case_ : int = self.image_processor_tester.get_expected_values(A__ , batched=A__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def UpperCAmelCase__ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Dict = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
snake_case_ : Optional[Any] = json.loads(f.read() )
snake_case_ : int = {"image_id": 3_97_69, "annotations": target}
# encode them
snake_case_ : Optional[int] = ConditionalDetrImageProcessor.from_pretrained("microsoft/conditional-detr-resnet-50" )
snake_case_ : Any = image_processing(images=A__ , annotations=A__ , return_tensors="pt" )
# verify pixel values
snake_case_ : List[Any] = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["pixel_values"].shape , A__ )
snake_case_ : List[str] = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , A__ , atol=1E-4 ) )
# verify area
snake_case_ : Tuple = torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , A__ ) )
# verify boxes
snake_case_ : Any = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , A__ )
snake_case_ : str = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , A__ , atol=1E-3 ) )
# verify image_id
snake_case_ : List[Any] = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , A__ ) )
# verify is_crowd
snake_case_ : Union[str, Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , A__ ) )
# verify class_labels
snake_case_ : Any = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , A__ ) )
# verify orig_size
snake_case_ : Any = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , A__ ) )
# verify size
snake_case_ : List[str] = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , A__ ) )
@slow
def UpperCAmelCase__ ( self : int ) -> str:
'''simple docstring'''
snake_case_ : Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
snake_case_ : Any = json.loads(f.read() )
snake_case_ : Optional[Any] = {"file_name": "000000039769.png", "image_id": 3_97_69, "segments_info": target}
snake_case_ : int = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
snake_case_ : Union[str, Any] = ConditionalDetrImageProcessor(format="coco_panoptic" )
snake_case_ : str = image_processing(images=A__ , annotations=A__ , masks_path=A__ , return_tensors="pt" )
# verify pixel values
snake_case_ : int = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["pixel_values"].shape , A__ )
snake_case_ : str = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , A__ , atol=1E-4 ) )
# verify area
snake_case_ : Optional[int] = torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , A__ ) )
# verify boxes
snake_case_ : str = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , A__ )
snake_case_ : str = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , A__ , atol=1E-3 ) )
# verify image_id
snake_case_ : List[str] = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , A__ ) )
# verify is_crowd
snake_case_ : Optional[int] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , A__ ) )
# verify class_labels
snake_case_ : Optional[int] = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , A__ ) )
# verify masks
snake_case_ : Union[str, Any] = 82_28_73
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , A__ )
# verify orig_size
snake_case_ : Dict = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , A__ ) )
# verify size
snake_case_ : str = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , A__ ) )
| 666 | 1 |
# flake8: noqa
# Lint as: python3
UpperCAmelCase = [
"VerificationMode",
"Version",
"disable_progress_bar",
"enable_progress_bar",
"is_progress_bar_enabled",
"experimental",
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental
| 666 | import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
UpperCAmelCase = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class snake_case__ :
_SCREAMING_SNAKE_CASE : str = field(
default=_UpperCamelCase , metadata={"help": "Model type selected in the list: " + ", ".join(_UpperCamelCase )} )
_SCREAMING_SNAKE_CASE : str = field(
default=_UpperCamelCase , metadata={"help": "The input data dir. Should contain the .json files for the SQuAD task."} )
_SCREAMING_SNAKE_CASE : int = field(
default=1_2_8 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
_SCREAMING_SNAKE_CASE : int = field(
default=1_2_8 , metadata={"help": "When splitting up a long document into chunks, how much stride to take between chunks."} , )
_SCREAMING_SNAKE_CASE : int = field(
default=6_4 , metadata={
"help": (
"The maximum number of tokens for the question. Questions longer than this will "
"be truncated to this length."
)
} , )
_SCREAMING_SNAKE_CASE : int = field(
default=3_0 , metadata={
"help": (
"The maximum length of an answer that can be generated. This is needed because the start "
"and end predictions are not conditioned on one another."
)
} , )
_SCREAMING_SNAKE_CASE : bool = field(
default=_UpperCamelCase , metadata={"help": "Overwrite the cached training and evaluation sets"} )
_SCREAMING_SNAKE_CASE : bool = field(
default=_UpperCamelCase , metadata={"help": "If true, the SQuAD examples contain some that do not have an answer."} )
_SCREAMING_SNAKE_CASE : float = field(
default=0.0 , metadata={"help": "If null_score - best_non_null is greater than the threshold predict null."} )
_SCREAMING_SNAKE_CASE : int = field(
default=2_0 , metadata={"help": "If null_score - best_non_null is greater than the threshold predict null."} )
_SCREAMING_SNAKE_CASE : int = field(
default=0 , metadata={
"help": (
"language id of input for language-specific xlm models (see"
" tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)"
)
} , )
_SCREAMING_SNAKE_CASE : int = field(default=1 , metadata={"help": "multiple threads for converting example to features"} )
class snake_case__ ( _UpperCamelCase ):
_SCREAMING_SNAKE_CASE : Tuple = "train"
_SCREAMING_SNAKE_CASE : Any = "dev"
class snake_case__ ( _UpperCamelCase ):
_SCREAMING_SNAKE_CASE : SquadDataTrainingArguments
_SCREAMING_SNAKE_CASE : List[SquadFeatures]
_SCREAMING_SNAKE_CASE : Split
_SCREAMING_SNAKE_CASE : bool
def __init__( self : str , A__ : SquadDataTrainingArguments , A__ : PreTrainedTokenizer , A__ : Optional[int] = None , A__ : Union[str, Split] = Split.train , A__ : Optional[bool] = False , A__ : Optional[str] = None , A__ : Optional[str] = "pt" , ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Tuple = args
snake_case_ : int = is_language_sensitive
snake_case_ : int = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(A__ , A__ ):
try:
snake_case_ : List[str] = Split[mode]
except KeyError:
raise KeyError("mode is not a valid split name" )
snake_case_ : Tuple = mode
# Load data features from cache or dataset file
snake_case_ : Dict = "v2" if args.version_2_with_negative else "v1"
snake_case_ : List[Any] = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f"cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}" , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
snake_case_ : List[Any] = cached_features_file + ".lock"
with FileLock(A__ ):
if os.path.exists(A__ ) and not args.overwrite_cache:
snake_case_ : int = time.time()
snake_case_ : List[Any] = torch.load(A__ )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
snake_case_ : Tuple = self.old_features["features"]
snake_case_ : List[str] = self.old_features.get("dataset" , A__ )
snake_case_ : Tuple = self.old_features.get("examples" , A__ )
logger.info(
f"Loading features from cached file {cached_features_file} [took %.3f s]" , time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
f"Deleting cached file {cached_features_file} will allow dataset and examples to be cached in"
" future run" )
else:
if mode == Split.dev:
snake_case_ : Tuple = self.processor.get_dev_examples(args.data_dir )
else:
snake_case_ : Tuple = self.processor.get_train_examples(args.data_dir )
snake_case_ ,snake_case_ : Optional[Any] = squad_convert_examples_to_features(
examples=self.examples , tokenizer=A__ , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=A__ , )
snake_case_ : Any = time.time()
torch.save(
{"features": self.features, "dataset": self.dataset, "examples": self.examples} , A__ , )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f"Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]" )
def __len__( self : str ) -> Dict:
'''simple docstring'''
return len(self.features )
def __getitem__( self : Optional[int] , A__ : Optional[int] ) -> Dict[str, torch.Tensor]:
'''simple docstring'''
snake_case_ : Any = self.features[i]
snake_case_ : Optional[int] = torch.tensor(feature.input_ids , dtype=torch.long )
snake_case_ : Union[str, Any] = torch.tensor(feature.attention_mask , dtype=torch.long )
snake_case_ : List[Any] = torch.tensor(feature.token_type_ids , dtype=torch.long )
snake_case_ : List[Any] = torch.tensor(feature.cls_index , dtype=torch.long )
snake_case_ : str = torch.tensor(feature.p_mask , dtype=torch.float )
snake_case_ : str = torch.tensor(feature.is_impossible , dtype=torch.float )
snake_case_ : Optional[int] = {
"input_ids": input_ids,
"attention_mask": attention_mask,
"token_type_ids": token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({"cls_index": cls_index, "p_mask": p_mask} )
if self.args.version_2_with_negative:
inputs.update({"is_impossible": is_impossible} )
if self.is_language_sensitive:
inputs.update({"langs": (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
snake_case_ : Any = torch.tensor(feature.start_position , dtype=torch.long )
snake_case_ : List[Any] = torch.tensor(feature.end_position , dtype=torch.long )
inputs.update({"start_positions": start_positions, "end_positions": end_positions} )
return inputs
| 666 | 1 |
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
UpperCAmelCase = ""
UpperCAmelCase = ""
UpperCAmelCase = ""
UpperCAmelCase = 1 # (0 is vertical, 1 is horizontal)
def SCREAMING_SNAKE_CASE_ ( ):
snake_case_ ,snake_case_ : List[str] = get_dataset(lowerCAmelCase_ , lowerCAmelCase_ )
print("Processing..." )
snake_case_ ,snake_case_ ,snake_case_ : Tuple = update_image_and_anno(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
for index, image in enumerate(lowerCAmelCase_ ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
snake_case_ : List[Any] = random_chars(3_2 )
snake_case_ : Dict = paths[index].split(os.sep )[-1].rsplit("." , 1 )[0]
snake_case_ : Dict = f"{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}"
cva.imwrite(f"/{file_root}.jpg" , lowerCAmelCase_ , [cva.IMWRITE_JPEG_QUALITY, 8_5] )
print(f"Success {index+1}/{len(lowerCAmelCase_ )} with {file_name}" )
snake_case_ : Optional[Any] = []
for anno in new_annos[index]:
snake_case_ : int = f"{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}"
annos_list.append(lowerCAmelCase_ )
with open(f"/{file_root}.txt" , "w" ) as outfile:
outfile.write("\n".join(line for line in annos_list ) )
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: str , lowerCAmelCase_: str ):
snake_case_ : Tuple = []
snake_case_ : Optional[int] = []
for label_file in glob.glob(os.path.join(lowerCAmelCase_ , "*.txt" ) ):
snake_case_ : Optional[int] = label_file.split(os.sep )[-1].rsplit("." , 1 )[0]
with open(lowerCAmelCase_ ) as in_file:
snake_case_ : int = in_file.readlines()
snake_case_ : int = os.path.join(lowerCAmelCase_ , f"{label_name}.jpg" )
snake_case_ : int = []
for obj_list in obj_lists:
snake_case_ : List[Any] = obj_list.rstrip("\n" ).split(" " )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(lowerCAmelCase_ )
labels.append(lowerCAmelCase_ )
return img_paths, labels
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: list , lowerCAmelCase_: list , lowerCAmelCase_: int = 1 ):
snake_case_ : Any = []
snake_case_ : Any = []
snake_case_ : Tuple = []
for idx in range(len(lowerCAmelCase_ ) ):
snake_case_ : Optional[Any] = []
snake_case_ : Union[str, Any] = img_list[idx]
path_list.append(lowerCAmelCase_ )
snake_case_ : int = anno_list[idx]
snake_case_ : Dict = cva.imread(lowerCAmelCase_ )
if flip_type == 1:
snake_case_ : Tuple = cva.flip(lowerCAmelCase_ , lowerCAmelCase_ )
for bbox in img_annos:
snake_case_ : Dict = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
snake_case_ : Dict = cva.flip(lowerCAmelCase_ , lowerCAmelCase_ )
for bbox in img_annos:
snake_case_ : Optional[int] = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(lowerCAmelCase_ )
new_imgs_list.append(lowerCAmelCase_ )
return new_imgs_list, new_annos_lists, path_list
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: int = 3_2 ):
assert number_char > 1, "The number of character should greater than 1"
snake_case_ : Optional[Any] = ascii_lowercase + digits
return "".join(random.choice(lowerCAmelCase_ ) for _ in range(lowerCAmelCase_ ) )
if __name__ == "__main__":
main()
print("DONE ✅")
| 666 | import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
"microsoft/git-base": "https://huggingface.co/microsoft/git-base/resolve/main/config.json",
}
class snake_case__ ( _UpperCamelCase ):
_SCREAMING_SNAKE_CASE : Dict = "git_vision_model"
def __init__( self : int , A__ : Union[str, Any]=7_68 , A__ : List[Any]=30_72 , A__ : Tuple=12 , A__ : Optional[Any]=12 , A__ : Optional[int]=3 , A__ : List[str]=2_24 , A__ : Dict=16 , A__ : int="quick_gelu" , A__ : Any=1E-5 , A__ : Tuple=0.0 , A__ : Optional[int]=0.02 , **A__ : List[str] , ) -> Optional[int]:
'''simple docstring'''
super().__init__(**A__ )
snake_case_ : Optional[Any] = hidden_size
snake_case_ : str = intermediate_size
snake_case_ : Optional[Any] = num_hidden_layers
snake_case_ : int = num_attention_heads
snake_case_ : Optional[int] = num_channels
snake_case_ : Union[str, Any] = patch_size
snake_case_ : List[str] = image_size
snake_case_ : List[Any] = initializer_range
snake_case_ : Any = attention_dropout
snake_case_ : Any = layer_norm_eps
snake_case_ : int = hidden_act
@classmethod
def UpperCAmelCase__ ( cls : List[Any] , A__ : Union[str, os.PathLike] , **A__ : Optional[int] ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(A__ )
snake_case_ ,snake_case_ : Tuple = cls.get_config_dict(A__ , **A__ )
# get the vision config dict if we are loading from GITConfig
if config_dict.get("model_type" ) == "git":
snake_case_ : Any = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(A__ , **A__ )
class snake_case__ ( _UpperCamelCase ):
_SCREAMING_SNAKE_CASE : Optional[Any] = "git"
def __init__( self : Any , A__ : List[str]=None , A__ : List[str]=3_05_22 , A__ : Tuple=7_68 , A__ : Tuple=6 , A__ : str=12 , A__ : Any=30_72 , A__ : List[str]="gelu" , A__ : int=0.1 , A__ : Dict=0.1 , A__ : Any=10_24 , A__ : Optional[Any]=0.02 , A__ : Optional[Any]=1E-12 , A__ : Dict=0 , A__ : Any="absolute" , A__ : Tuple=True , A__ : Any=False , A__ : Tuple=1_01 , A__ : Tuple=1_02 , A__ : List[Any]=None , **A__ : List[str] , ) -> int:
'''simple docstring'''
super().__init__(bos_token_id=A__ , eos_token_id=A__ , pad_token_id=A__ , **A__ )
if vision_config is None:
snake_case_ : int = {}
logger.info("vision_config is None. initializing the GitVisionConfig with default values." )
snake_case_ : str = GitVisionConfig(**A__ )
snake_case_ : int = vocab_size
snake_case_ : List[Any] = hidden_size
snake_case_ : Tuple = num_hidden_layers
snake_case_ : List[Any] = num_attention_heads
snake_case_ : Any = hidden_act
snake_case_ : Dict = intermediate_size
snake_case_ : Any = hidden_dropout_prob
snake_case_ : Any = attention_probs_dropout_prob
snake_case_ : Union[str, Any] = max_position_embeddings
snake_case_ : List[str] = initializer_range
snake_case_ : List[str] = layer_norm_eps
snake_case_ : Any = position_embedding_type
snake_case_ : Union[str, Any] = use_cache
snake_case_ : str = tie_word_embeddings
snake_case_ : List[Any] = num_image_with_embedding
snake_case_ : Dict = bos_token_id
snake_case_ : int = eos_token_id
def UpperCAmelCase__ ( self : Any ) -> int:
'''simple docstring'''
snake_case_ : Tuple = copy.deepcopy(self.__dict__ )
snake_case_ : Optional[int] = self.vision_config.to_dict()
snake_case_ : Tuple = self.__class__.model_type
return output
| 666 | 1 |
import os
from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE, hf_cache_home
UpperCAmelCase = HUGGINGFACE_HUB_CACHE
UpperCAmelCase = "config.json"
UpperCAmelCase = "diffusion_pytorch_model.bin"
UpperCAmelCase = "diffusion_flax_model.msgpack"
UpperCAmelCase = "model.onnx"
UpperCAmelCase = "diffusion_pytorch_model.safetensors"
UpperCAmelCase = "weights.pb"
UpperCAmelCase = "https://huggingface.co"
UpperCAmelCase = default_cache_path
UpperCAmelCase = "diffusers_modules"
UpperCAmelCase = os.getenv("HF_MODULES_CACHE", os.path.join(hf_cache_home, "modules"))
UpperCAmelCase = ["fp16", "non-ema"]
UpperCAmelCase = ".self_attn"
| 666 | def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: str , lowerCAmelCase_: str ):
def get_matched_characters(lowerCAmelCase_: str , lowerCAmelCase_: str ) -> str:
snake_case_ : Tuple = []
snake_case_ : Tuple = min(len(_stra ) , len(_stra ) ) // 2
for i, l in enumerate(_stra ):
snake_case_ : str = int(max(0 , i - limit ) )
snake_case_ : Optional[int] = int(min(i + limit + 1 , len(_stra ) ) )
if l in _stra[left:right]:
matched.append(lowerCAmelCase_ )
snake_case_ : List[Any] = f"{_stra[0:_stra.index(lowerCAmelCase_ )]} {_stra[_stra.index(lowerCAmelCase_ ) + 1:]}"
return "".join(lowerCAmelCase_ )
# matching characters
snake_case_ : List[Any] = get_matched_characters(lowerCAmelCase_ , lowerCAmelCase_ )
snake_case_ : int = get_matched_characters(lowerCAmelCase_ , lowerCAmelCase_ )
snake_case_ : Optional[int] = len(lowerCAmelCase_ )
# transposition
snake_case_ : List[str] = (
len([(ca, ca) for ca, ca in zip(lowerCAmelCase_ , lowerCAmelCase_ ) if ca != ca] ) // 2
)
if not match_count:
snake_case_ : str = 0.0
else:
snake_case_ : Optional[Any] = (
1
/ 3
* (
match_count / len(lowerCAmelCase_ )
+ match_count / len(lowerCAmelCase_ )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
snake_case_ : Optional[Any] = 0
for ca, ca in zip(stra[:4] , stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler("hello", "world"))
| 666 | 1 |
import argparse
import json
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
VideoMAEConfig,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEImageProcessor,
)
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: Dict ):
snake_case_ : Optional[int] = VideoMAEConfig()
set_architecture_configs(lowerCAmelCase_ , lowerCAmelCase_ )
if "finetuned" not in model_name:
snake_case_ : int = False
if "finetuned" in model_name:
snake_case_ : int = "huggingface/label-files"
if "kinetics" in model_name:
snake_case_ : List[str] = 4_0_0
snake_case_ : Dict = "kinetics400-id2label.json"
elif "ssv2" in model_name:
snake_case_ : Dict = 1_7_4
snake_case_ : Optional[Any] = "something-something-v2-id2label.json"
else:
raise ValueError("Model name should either contain 'kinetics' or 'ssv2' in case it's fine-tuned." )
snake_case_ : Dict = json.load(open(hf_hub_download(lowerCAmelCase_ , lowerCAmelCase_ , repo_type="dataset" ) , "r" ) )
snake_case_ : List[Any] = {int(lowerCAmelCase_ ): v for k, v in idalabel.items()}
snake_case_ : Any = idalabel
snake_case_ : List[Any] = {v: k for k, v in idalabel.items()}
return config
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: Optional[Any] , lowerCAmelCase_: List[Any] ):
if "small" in model_name:
snake_case_ : Dict = 3_8_4
snake_case_ : List[str] = 1_5_3_6
snake_case_ : Dict = 1_2
snake_case_ : List[str] = 1_6
snake_case_ : str = 1_2
snake_case_ : int = 3
snake_case_ : List[str] = 1_9_2
snake_case_ : List[str] = 7_6_8
elif "large" in model_name:
snake_case_ : Optional[int] = 1_0_2_4
snake_case_ : str = 4_0_9_6
snake_case_ : Any = 2_4
snake_case_ : str = 1_6
snake_case_ : str = 1_2
snake_case_ : Dict = 8
snake_case_ : Any = 5_1_2
snake_case_ : str = 2_0_4_8
elif "huge" in model_name:
snake_case_ : Optional[int] = 1_2_8_0
snake_case_ : Tuple = 5_1_2_0
snake_case_ : Dict = 3_2
snake_case_ : Union[str, Any] = 1_6
snake_case_ : Union[str, Any] = 1_2
snake_case_ : Optional[Any] = 8
snake_case_ : Tuple = 6_4_0
snake_case_ : Any = 2_5_6_0
elif "base" not in model_name:
raise ValueError("Model name should include either \"small\", \"base\", \"large\", or \"huge\"" )
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: Optional[int] ):
if "encoder." in name:
snake_case_ : Optional[int] = name.replace("encoder." , "" )
if "cls_token" in name:
snake_case_ : Dict = name.replace("cls_token" , "videomae.embeddings.cls_token" )
if "decoder_pos_embed" in name:
snake_case_ : str = name.replace("decoder_pos_embed" , "decoder.decoder_pos_embed" )
if "pos_embed" in name and "decoder" not in name:
snake_case_ : List[str] = name.replace("pos_embed" , "videomae.embeddings.position_embeddings" )
if "patch_embed.proj" in name:
snake_case_ : Dict = name.replace("patch_embed.proj" , "videomae.embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
snake_case_ : Optional[Any] = name.replace("patch_embed.norm" , "videomae.embeddings.norm" )
if "decoder.blocks" in name:
snake_case_ : Tuple = name.replace("decoder.blocks" , "decoder.decoder_layers" )
if "blocks" in name:
snake_case_ : str = name.replace("blocks" , "videomae.encoder.layer" )
if "attn.proj" in name:
snake_case_ : str = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name and "bias" not in name:
snake_case_ : Optional[Any] = name.replace("attn" , "attention.self" )
if "attn" in name:
snake_case_ : List[str] = name.replace("attn" , "attention.attention" )
if "norm1" in name:
snake_case_ : Union[str, Any] = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
snake_case_ : int = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
snake_case_ : Optional[int] = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
snake_case_ : Optional[int] = name.replace("mlp.fc2" , "output.dense" )
if "decoder_embed" in name:
snake_case_ : List[str] = name.replace("decoder_embed" , "decoder.decoder_embed" )
if "decoder_norm" in name:
snake_case_ : Optional[Any] = name.replace("decoder_norm" , "decoder.decoder_norm" )
if "decoder_pred" in name:
snake_case_ : Any = name.replace("decoder_pred" , "decoder.decoder_pred" )
if "norm.weight" in name and "decoder" not in name and "fc" not in name:
snake_case_ : Optional[int] = name.replace("norm.weight" , "videomae.layernorm.weight" )
if "norm.bias" in name and "decoder" not in name and "fc" not in name:
snake_case_ : Any = name.replace("norm.bias" , "videomae.layernorm.bias" )
if "head" in name and "decoder" not in name:
snake_case_ : str = name.replace("head" , "classifier" )
return name
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: Union[str, Any] , lowerCAmelCase_: int ):
for key in orig_state_dict.copy().keys():
snake_case_ : str = orig_state_dict.pop(lowerCAmelCase_ )
if key.startswith("encoder." ):
snake_case_ : Any = key.replace("encoder." , "" )
if "qkv" in key:
snake_case_ : List[Any] = key.split("." )
if key.startswith("decoder.blocks" ):
snake_case_ : Any = config.decoder_hidden_size
snake_case_ : List[Any] = int(key_split[2] )
snake_case_ : Optional[Any] = "decoder.decoder_layers."
if "weight" in key:
snake_case_ : Optional[Any] = val[:dim, :]
snake_case_ : Optional[int] = val[dim : dim * 2, :]
snake_case_ : Union[str, Any] = val[-dim:, :]
else:
snake_case_ : List[Any] = config.hidden_size
snake_case_ : Union[str, Any] = int(key_split[1] )
snake_case_ : Optional[Any] = "videomae.encoder.layer."
if "weight" in key:
snake_case_ : Any = val[:dim, :]
snake_case_ : int = val[dim : dim * 2, :]
snake_case_ : Tuple = val[-dim:, :]
else:
snake_case_ : Tuple = val
return orig_state_dict
def SCREAMING_SNAKE_CASE_ ( ):
snake_case_ : List[Any] = hf_hub_download(
repo_id="hf-internal-testing/spaghetti-video" , filename="eating_spaghetti.npy" , repo_type="dataset" )
snake_case_ : Any = np.load(lowerCAmelCase_ )
return list(lowerCAmelCase_ )
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: Union[str, Any] , lowerCAmelCase_: Optional[int] , lowerCAmelCase_: str , lowerCAmelCase_: Union[str, Any] ):
snake_case_ : Optional[Any] = get_videomae_config(lowerCAmelCase_ )
if "finetuned" in model_name:
snake_case_ : Dict = VideoMAEForVideoClassification(lowerCAmelCase_ )
else:
snake_case_ : str = VideoMAEForPreTraining(lowerCAmelCase_ )
# download original checkpoint, hosted on Google Drive
snake_case_ : int = "pytorch_model.bin"
gdown.cached_download(lowerCAmelCase_ , lowerCAmelCase_ , quiet=lowerCAmelCase_ )
snake_case_ : str = torch.load(lowerCAmelCase_ , map_location="cpu" )
if "model" in files:
snake_case_ : str = files["model"]
else:
snake_case_ : Any = files["module"]
snake_case_ : int = convert_state_dict(lowerCAmelCase_ , lowerCAmelCase_ )
model.load_state_dict(lowerCAmelCase_ )
model.eval()
# verify model on basic input
snake_case_ : Any = VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
snake_case_ : Optional[Any] = prepare_video()
snake_case_ : Union[str, Any] = image_processor(lowerCAmelCase_ , return_tensors="pt" )
if "finetuned" not in model_name:
snake_case_ : Optional[Any] = hf_hub_download(repo_id="hf-internal-testing/bool-masked-pos" , filename="bool_masked_pos.pt" )
snake_case_ : Optional[int] = torch.load(lowerCAmelCase_ )
snake_case_ : int = model(**lowerCAmelCase_ )
snake_case_ : Tuple = outputs.logits
snake_case_ : str = [
"videomae-small-finetuned-kinetics",
"videomae-small-finetuned-ssv2",
# Kinetics-400 checkpoints (short = pretrained only for 800 epochs instead of 1600)
"videomae-base-short",
"videomae-base-short-finetuned-kinetics",
"videomae-base",
"videomae-base-finetuned-kinetics",
"videomae-large",
"videomae-large-finetuned-kinetics",
"videomae-huge-finetuned-kinetics",
# Something-Something-v2 checkpoints (short = pretrained only for 800 epochs instead of 2400)
"videomae-base-short-ssv2",
"videomae-base-short-finetuned-ssv2",
"videomae-base-ssv2",
"videomae-base-finetuned-ssv2",
]
# NOTE: logits were tested with image_mean and image_std equal to [0.5, 0.5, 0.5] and [0.5, 0.5, 0.5]
if model_name == "videomae-small-finetuned-kinetics":
snake_case_ : Optional[Any] = torch.Size([1, 4_0_0] )
snake_case_ : List[str] = torch.tensor([-0.9_2_9_1, -0.4_0_6_1, -0.9_3_0_7] )
elif model_name == "videomae-small-finetuned-ssv2":
snake_case_ : Tuple = torch.Size([1, 1_7_4] )
snake_case_ : Optional[Any] = torch.tensor([0.2_6_7_1, -0.4_6_8_9, -0.8_2_3_5] )
elif model_name == "videomae-base":
snake_case_ : List[Any] = torch.Size([1, 1_4_0_8, 1_5_3_6] )
snake_case_ : Optional[Any] = torch.tensor([[0.7_7_3_9, 0.7_9_6_8, 0.7_0_8_9], [0.6_7_0_1, 0.7_4_8_7, 0.6_2_0_9], [0.4_2_8_7, 0.5_1_5_8, 0.4_7_7_3]] )
elif model_name == "videomae-base-short":
snake_case_ : int = torch.Size([1, 1_4_0_8, 1_5_3_6] )
snake_case_ : List[Any] = torch.tensor([[0.7_9_9_4, 0.9_6_1_2, 0.8_5_0_8], [0.7_4_0_1, 0.8_9_5_8, 0.8_3_0_2], [0.5_8_6_2, 0.7_4_6_8, 0.7_3_2_5]] )
# we verified the loss both for normalized and unnormalized targets for this one
snake_case_ : int = torch.tensor([0.5_1_4_2] ) if config.norm_pix_loss else torch.tensor([0.6_4_6_9] )
elif model_name == "videomae-large":
snake_case_ : str = torch.Size([1, 1_4_0_8, 1_5_3_6] )
snake_case_ : Tuple = torch.tensor([[0.7_1_4_9, 0.7_9_9_7, 0.6_9_6_6], [0.6_7_6_8, 0.7_8_6_9, 0.6_9_4_8], [0.5_1_3_9, 0.6_2_2_1, 0.5_6_0_5]] )
elif model_name == "videomae-large-finetuned-kinetics":
snake_case_ : str = torch.Size([1, 4_0_0] )
snake_case_ : int = torch.tensor([0.0_7_7_1, 0.0_0_1_1, -0.3_6_2_5] )
elif model_name == "videomae-huge-finetuned-kinetics":
snake_case_ : int = torch.Size([1, 4_0_0] )
snake_case_ : Dict = torch.tensor([0.2_4_3_3, 0.1_6_3_2, -0.4_8_9_4] )
elif model_name == "videomae-base-short-finetuned-kinetics":
snake_case_ : Optional[int] = torch.Size([1, 4_0_0] )
snake_case_ : Tuple = torch.tensor([0.6_5_8_8, 0.0_9_9_0, -0.2_4_9_3] )
elif model_name == "videomae-base-finetuned-kinetics":
snake_case_ : str = torch.Size([1, 4_0_0] )
snake_case_ : Any = torch.tensor([0.3_6_6_9, -0.0_6_8_8, -0.2_4_2_1] )
elif model_name == "videomae-base-short-ssv2":
snake_case_ : int = torch.Size([1, 1_4_0_8, 1_5_3_6] )
snake_case_ : str = torch.tensor([[0.4_7_1_2, 0.5_2_9_6, 0.5_7_8_6], [0.2_2_7_8, 0.2_7_2_9, 0.4_0_2_6], [0.0_3_5_2, 0.0_7_3_0, 0.2_5_0_6]] )
elif model_name == "videomae-base-short-finetuned-ssv2":
snake_case_ : List[str] = torch.Size([1, 1_7_4] )
snake_case_ : List[str] = torch.tensor([-0.0_5_3_7, -0.1_5_3_9, -0.3_2_6_6] )
elif model_name == "videomae-base-ssv2":
snake_case_ : int = torch.Size([1, 1_4_0_8, 1_5_3_6] )
snake_case_ : List[str] = torch.tensor([[0.8_1_3_1, 0.8_7_2_7, 0.8_5_4_6], [0.7_3_6_6, 0.9_3_7_7, 0.8_8_7_0], [0.5_9_3_5, 0.8_8_7_4, 0.8_5_6_4]] )
elif model_name == "videomae-base-finetuned-ssv2":
snake_case_ : Optional[Any] = torch.Size([1, 1_7_4] )
snake_case_ : Any = torch.tensor([0.1_9_6_1, -0.8_3_3_7, -0.6_3_8_9] )
else:
raise ValueError(f"Model name not supported. Should be one of {model_names}" )
# verify logits
assert logits.shape == expected_shape
if "finetuned" in model_name:
assert torch.allclose(logits[0, :3] , lowerCAmelCase_ , atol=1e-4 )
else:
print("Logits:" , logits[0, :3, :3] )
assert torch.allclose(logits[0, :3, :3] , lowerCAmelCase_ , atol=1e-4 )
print("Logits ok!" )
# verify loss, if applicable
if model_name == "videomae-base-short":
snake_case_ : Tuple = outputs.loss
assert torch.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1e-4 )
print("Loss ok!" )
if pytorch_dump_folder_path is not None:
print(f"Saving model and image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(lowerCAmelCase_ )
model.save_pretrained(lowerCAmelCase_ )
if push_to_hub:
print("Pushing to the hub..." )
model.push_to_hub(lowerCAmelCase_ , organization="nielsr" )
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://drive.google.com/u/1/uc?id=1tEhLyskjb755TJ65ptsrafUG2llSwQE1&export=download&confirm=t&uuid=aa3276eb-fb7e-482a-adec-dc7171df14c4",
type=str,
help=(
"URL of the original PyTorch checkpoint (on Google Drive) you'd like to convert. Should be a direct"
" download link."
),
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="/Users/nielsrogge/Documents/VideoMAE/Test",
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument("--model_name", default="videomae-base", type=str, help="Name of the model.")
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
UpperCAmelCase = parser.parse_args()
convert_videomae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 666 | import argparse
import os
from pathlib import Path
import torch
from bark.generation import _load_model as _bark_load_model
from huggingface_hub import hf_hub_download
from transformers import EncodecConfig, EncodecModel, set_seed
from transformers.models.bark.configuration_bark import (
BarkCoarseConfig,
BarkConfig,
BarkFineConfig,
BarkSemanticConfig,
)
from transformers.models.bark.generation_configuration_bark import (
BarkCoarseGenerationConfig,
BarkFineGenerationConfig,
BarkGenerationConfig,
BarkSemanticGenerationConfig,
)
from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase = logging.get_logger(__name__)
set_seed(7_7_0)
UpperCAmelCase = {
"c_attn": "att_proj",
"c_proj": "out_proj",
"c_fc": "in_proj",
"transformer.": "",
"h.": "layers.",
"ln_1": "layernorm_1",
"ln_2": "layernorm_2",
"ln_f": "layernorm_final",
"wpe": "position_embeds_layer",
"wte": "input_embeds_layer",
}
UpperCAmelCase = {
"text_small": {
"repo_id": "suno/bark",
"file_name": "text.pt",
},
"coarse_small": {
"repo_id": "suno/bark",
"file_name": "coarse.pt",
},
"fine_small": {
"repo_id": "suno/bark",
"file_name": "fine.pt",
},
"text": {
"repo_id": "suno/bark",
"file_name": "text_2.pt",
},
"coarse": {
"repo_id": "suno/bark",
"file_name": "coarse_2.pt",
},
"fine": {
"repo_id": "suno/bark",
"file_name": "fine_2.pt",
},
}
UpperCAmelCase = os.path.dirname(os.path.abspath(__file__))
UpperCAmelCase = os.path.join(os.path.expanduser("~"), ".cache")
UpperCAmelCase = os.path.join(os.getenv("XDG_CACHE_HOME", default_cache_dir), "suno", "bark_v0")
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: int , lowerCAmelCase_: List[str]=False ):
snake_case_ : Union[str, Any] = model_type
if use_small:
key += "_small"
return os.path.join(lowerCAmelCase_ , REMOTE_MODEL_PATHS[key]["file_name"] )
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: str , lowerCAmelCase_: List[str] ):
os.makedirs(lowerCAmelCase_ , exist_ok=lowerCAmelCase_ )
hf_hub_download(repo_id=lowerCAmelCase_ , filename=lowerCAmelCase_ , local_dir=lowerCAmelCase_ )
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: Any , lowerCAmelCase_: Dict , lowerCAmelCase_: List[str]=False , lowerCAmelCase_: Dict="text" ):
if model_type == "text":
snake_case_ : int = BarkSemanticModel
snake_case_ : str = BarkSemanticConfig
snake_case_ : Optional[Any] = BarkSemanticGenerationConfig
elif model_type == "coarse":
snake_case_ : str = BarkCoarseModel
snake_case_ : Optional[int] = BarkCoarseConfig
snake_case_ : Any = BarkCoarseGenerationConfig
elif model_type == "fine":
snake_case_ : Optional[int] = BarkFineModel
snake_case_ : Tuple = BarkFineConfig
snake_case_ : List[str] = BarkFineGenerationConfig
else:
raise NotImplementedError()
snake_case_ : Optional[Any] = f"{model_type}_small" if use_small else model_type
snake_case_ : Any = REMOTE_MODEL_PATHS[model_key]
if not os.path.exists(lowerCAmelCase_ ):
logger.info(f"{model_type} model not found, downloading into `{CACHE_DIR}`." )
_download(model_info["repo_id"] , model_info["file_name"] )
snake_case_ : Any = torch.load(lowerCAmelCase_ , map_location=lowerCAmelCase_ )
# this is a hack
snake_case_ : Union[str, Any] = checkpoint["model_args"]
if "input_vocab_size" not in model_args:
snake_case_ : str = model_args["vocab_size"]
snake_case_ : Union[str, Any] = model_args["vocab_size"]
del model_args["vocab_size"]
# convert Bark model arguments to HF Bark model arguments
snake_case_ : Union[str, Any] = model_args.pop("n_head" )
snake_case_ : int = model_args.pop("n_embd" )
snake_case_ : Any = model_args.pop("n_layer" )
snake_case_ : List[str] = ConfigClass(**checkpoint["model_args"] )
snake_case_ : Optional[Any] = ModelClass(config=lowerCAmelCase_ )
snake_case_ : Tuple = GenerationConfigClass()
snake_case_ : List[str] = model_generation_config
snake_case_ : Optional[int] = checkpoint["model"]
# fixup checkpoint
snake_case_ : Optional[int] = "_orig_mod."
for k, v in list(state_dict.items() ):
if k.startswith(lowerCAmelCase_ ):
# replace part of the key with corresponding layer name in HF implementation
snake_case_ : Tuple = k[len(lowerCAmelCase_ ) :]
for old_layer_name in new_layer_name_dict:
snake_case_ : int = new_k.replace(lowerCAmelCase_ , new_layer_name_dict[old_layer_name] )
snake_case_ : int = state_dict.pop(lowerCAmelCase_ )
snake_case_ : Optional[int] = set(state_dict.keys() ) - set(model.state_dict().keys() )
snake_case_ : str = {k for k in extra_keys if not k.endswith(".attn.bias" )}
snake_case_ : Any = set(model.state_dict().keys() ) - set(state_dict.keys() )
snake_case_ : List[Any] = {k for k in missing_keys if not k.endswith(".attn.bias" )}
if len(lowerCAmelCase_ ) != 0:
raise ValueError(f"extra keys found: {extra_keys}" )
if len(lowerCAmelCase_ ) != 0:
raise ValueError(f"missing keys: {missing_keys}" )
model.load_state_dict(lowerCAmelCase_ , strict=lowerCAmelCase_ )
snake_case_ : str = model.num_parameters(exclude_embeddings=lowerCAmelCase_ )
snake_case_ : Union[str, Any] = checkpoint["best_val_loss"].item()
logger.info(f"model loaded: {round(n_params/1e6 , 1 )}M params, {round(lowerCAmelCase_ , 3 )} loss" )
model.eval()
model.to(lowerCAmelCase_ )
del checkpoint, state_dict
return model
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: List[Any] , lowerCAmelCase_: str=False , lowerCAmelCase_: int="text" ):
if model_type not in ("text", "coarse", "fine"):
raise NotImplementedError()
snake_case_ : int = "cpu" # do conversion on cpu
snake_case_ : Optional[Any] = _get_ckpt_path(lowerCAmelCase_ , use_small=lowerCAmelCase_ )
snake_case_ : Tuple = _load_model(lowerCAmelCase_ , lowerCAmelCase_ , model_type=lowerCAmelCase_ , use_small=lowerCAmelCase_ )
# load bark initial model
snake_case_ : int = _bark_load_model(lowerCAmelCase_ , "cpu" , model_type=lowerCAmelCase_ , use_small=lowerCAmelCase_ )
if model_type == "text":
snake_case_ : Union[str, Any] = bark_model["model"]
if model.num_parameters(exclude_embeddings=lowerCAmelCase_ ) != bark_model.get_num_params():
raise ValueError("initial and new models don't have the same number of parameters" )
# check if same output as the bark model
snake_case_ : Optional[Any] = 5
snake_case_ : Optional[int] = 1_0
if model_type in ["text", "coarse"]:
snake_case_ : Optional[Any] = torch.randint(2_5_6 , (batch_size, sequence_length) , dtype=torch.int )
snake_case_ : str = bark_model(lowerCAmelCase_ )[0]
snake_case_ : Tuple = model(lowerCAmelCase_ )
# take last logits
snake_case_ : List[str] = output_new_model_total.logits[:, [-1], :]
else:
snake_case_ : Optional[int] = 3
snake_case_ : str = 8
snake_case_ : List[str] = torch.randint(2_5_6 , (batch_size, sequence_length, n_codes_total) , dtype=torch.int )
snake_case_ : Any = model(lowerCAmelCase_ , lowerCAmelCase_ )
snake_case_ : Union[str, Any] = bark_model(lowerCAmelCase_ , lowerCAmelCase_ )
snake_case_ : Optional[int] = output_new_model_total.logits
# output difference should come from the difference of self-attention implementation design
if output_new_model.shape != output_old_model.shape:
raise ValueError("initial and new outputs don't have the same shape" )
if (output_new_model - output_old_model).abs().max().item() > 1e-3:
raise ValueError("initial and new outputs are not equal" )
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
model.save_pretrained(lowerCAmelCase_ )
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: Tuple , lowerCAmelCase_: List[str] , lowerCAmelCase_: Any , lowerCAmelCase_: List[Any] , lowerCAmelCase_: int , lowerCAmelCase_: Optional[Any] , ):
snake_case_ : Optional[Any] = os.path.join(lowerCAmelCase_ , lowerCAmelCase_ )
snake_case_ : Optional[Any] = BarkSemanticConfig.from_pretrained(os.path.join(lowerCAmelCase_ , "config.json" ) )
snake_case_ : List[Any] = BarkCoarseConfig.from_pretrained(os.path.join(lowerCAmelCase_ , "config.json" ) )
snake_case_ : List[str] = BarkFineConfig.from_pretrained(os.path.join(lowerCAmelCase_ , "config.json" ) )
snake_case_ : List[Any] = EncodecConfig.from_pretrained("facebook/encodec_24khz" )
snake_case_ : List[str] = BarkSemanticModel.from_pretrained(lowerCAmelCase_ )
snake_case_ : Optional[Any] = BarkCoarseModel.from_pretrained(lowerCAmelCase_ )
snake_case_ : Tuple = BarkFineModel.from_pretrained(lowerCAmelCase_ )
snake_case_ : Union[str, Any] = EncodecModel.from_pretrained("facebook/encodec_24khz" )
snake_case_ : Tuple = BarkConfig.from_sub_model_configs(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
snake_case_ : List[Any] = BarkGenerationConfig.from_sub_model_configs(
semantic.generation_config , coarseAcoustic.generation_config , fineAcoustic.generation_config )
snake_case_ : Optional[int] = BarkModel(lowerCAmelCase_ )
snake_case_ : int = semantic
snake_case_ : List[str] = coarseAcoustic
snake_case_ : str = fineAcoustic
snake_case_ : Optional[Any] = codec
snake_case_ : Any = bark_generation_config
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
bark.save_pretrained(lowerCAmelCase_ , repo_id=lowerCAmelCase_ , push_to_hub=lowerCAmelCase_ )
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument("model_type", type=str, help="text, coarse or fine.")
parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--is_small", action="store_true", help="convert the small version instead of the large.")
UpperCAmelCase = parser.parse_args()
load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small)
| 666 | 1 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCAmelCase = {"configuration_van": ["VAN_PRETRAINED_CONFIG_ARCHIVE_MAP", "VanConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
"VAN_PRETRAINED_MODEL_ARCHIVE_LIST",
"VanForImageClassification",
"VanModel",
"VanPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_van import (
VAN_PRETRAINED_MODEL_ARCHIVE_LIST,
VanForImageClassification,
VanModel,
VanPreTrainedModel,
)
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 666 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase = {
"configuration_upernet": ["UperNetConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
"UperNetForSemanticSegmentation",
"UperNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_upernet import UperNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_upernet import UperNetForSemanticSegmentation, UperNetPreTrainedModel
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 666 | 1 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
"BridgeTower/bridgetower-base": "https://huggingface.co/BridgeTower/bridgetower-base/blob/main/config.json",
"BridgeTower/bridgetower-base-itm-mlm": (
"https://huggingface.co/BridgeTower/bridgetower-base-itm-mlm/blob/main/config.json"
),
}
class snake_case__ ( _UpperCamelCase ):
_SCREAMING_SNAKE_CASE : Optional[Any] = "bridgetower_vision_model"
def __init__( self : int , A__ : str=7_68 , A__ : Union[str, Any]=12 , A__ : Tuple=3 , A__ : Tuple=16 , A__ : Optional[Any]=2_88 , A__ : str=1 , A__ : Union[str, Any]=1E-05 , A__ : List[str]=False , A__ : Optional[int]=True , A__ : Optional[Any]=False , **A__ : int , ) -> Any:
'''simple docstring'''
super().__init__(**A__ )
snake_case_ : Dict = hidden_size
snake_case_ : List[Any] = num_hidden_layers
snake_case_ : List[Any] = num_channels
snake_case_ : Dict = patch_size
snake_case_ : List[Any] = image_size
snake_case_ : List[str] = initializer_factor
snake_case_ : List[str] = layer_norm_eps
snake_case_ : Optional[Any] = stop_gradient
snake_case_ : Optional[Any] = share_layernorm
snake_case_ : Any = remove_last_layer
@classmethod
def UpperCAmelCase__ ( cls : Union[str, Any] , A__ : Union[str, os.PathLike] , **A__ : List[Any] ) -> "PretrainedConfig":
'''simple docstring'''
snake_case_ ,snake_case_ : List[Any] = cls.get_config_dict(A__ , **A__ )
if config_dict.get("model_type" ) == "bridgetower":
snake_case_ : Optional[Any] = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(A__ , **A__ )
class snake_case__ ( _UpperCamelCase ):
_SCREAMING_SNAKE_CASE : str = "bridgetower_text_model"
def __init__( self : Dict , A__ : Union[str, Any]=5_02_65 , A__ : Any=7_68 , A__ : Dict=12 , A__ : Tuple=12 , A__ : str=1 , A__ : Dict=30_72 , A__ : int="gelu" , A__ : str=0.1 , A__ : List[str]=0.1 , A__ : int=5_14 , A__ : Optional[int]=1 , A__ : int=1E-05 , A__ : Tuple=1 , A__ : Optional[int]=0 , A__ : Tuple=2 , A__ : List[str]="absolute" , A__ : Dict=True , **A__ : Optional[int] , ) -> str:
'''simple docstring'''
super().__init__(**A__ )
snake_case_ : Any = vocab_size
snake_case_ : Any = hidden_size
snake_case_ : Dict = num_hidden_layers
snake_case_ : List[Any] = num_attention_heads
snake_case_ : Dict = hidden_act
snake_case_ : str = initializer_factor
snake_case_ : List[Any] = intermediate_size
snake_case_ : str = hidden_dropout_prob
snake_case_ : Dict = attention_probs_dropout_prob
snake_case_ : Any = max_position_embeddings
snake_case_ : Any = type_vocab_size
snake_case_ : Any = layer_norm_eps
snake_case_ : Optional[Any] = position_embedding_type
snake_case_ : Optional[int] = use_cache
snake_case_ : Any = pad_token_id
snake_case_ : List[str] = bos_token_id
snake_case_ : str = eos_token_id
@classmethod
def UpperCAmelCase__ ( cls : str , A__ : Union[str, os.PathLike] , **A__ : Optional[int] ) -> "PretrainedConfig":
'''simple docstring'''
snake_case_ ,snake_case_ : Any = cls.get_config_dict(A__ , **A__ )
if config_dict.get("model_type" ) == "bridgetower":
snake_case_ : Union[str, Any] = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(A__ , **A__ )
class snake_case__ ( _UpperCamelCase ):
_SCREAMING_SNAKE_CASE : Tuple = "bridgetower"
def __init__( self : Tuple , A__ : Union[str, Any]=True , A__ : Optional[Any]="gelu" , A__ : Dict=7_68 , A__ : Tuple=1 , A__ : List[str]=1E-05 , A__ : str=False , A__ : Union[str, Any]="add" , A__ : Any=12 , A__ : Optional[Any]=6 , A__ : List[str]=False , A__ : str=False , A__ : List[str]=None , A__ : int=None , **A__ : Optional[Any] , ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Optional[int] = kwargs.pop("text_config_dict" , A__ )
snake_case_ : Dict = kwargs.pop("vision_config_dict" , A__ )
super().__init__(**A__ )
snake_case_ : Any = share_cross_modal_transformer_layers
snake_case_ : List[Any] = hidden_act
snake_case_ : int = hidden_size
snake_case_ : int = initializer_factor
snake_case_ : Dict = layer_norm_eps
snake_case_ : Union[str, Any] = share_link_tower_layers
snake_case_ : int = link_tower_type
snake_case_ : List[Any] = num_attention_heads
snake_case_ : Optional[int] = num_hidden_layers
snake_case_ : Dict = tie_word_embeddings
snake_case_ : List[Any] = init_layernorm_from_vision_encoder
if text_config is None:
snake_case_ : str = {}
logger.info("`text_config` is `None`. Initializing the `BridgeTowerTextConfig` with default values." )
if vision_config is None:
snake_case_ : Union[str, Any] = {}
logger.info("`vision_config` is `None`. Initializing the `BridgeTowerVisionConfig` with default values." )
snake_case_ : int = BridgeTowerTextConfig(**A__ )
snake_case_ : List[Any] = BridgeTowerVisionConfig(**A__ )
@classmethod
def UpperCAmelCase__ ( cls : Optional[Any] , A__ : BridgeTowerTextConfig , A__ : BridgeTowerVisionConfig , **A__ : List[str] ) -> Dict:
'''simple docstring'''
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **A__ )
def UpperCAmelCase__ ( self : Tuple ) -> int:
'''simple docstring'''
snake_case_ : int = copy.deepcopy(self.__dict__ )
snake_case_ : int = self.text_config.to_dict()
snake_case_ : List[str] = self.vision_config.to_dict()
snake_case_ : Optional[Any] = self.__class__.model_type
return output
| 666 | from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
UpperCAmelCase = logging.get_logger(__name__)
class snake_case__ ( _UpperCamelCase ):
_SCREAMING_SNAKE_CASE : str = ["pixel_values"]
def __init__( self : List[Any] , A__ : bool = True , A__ : Optional[Dict[str, int]] = None , A__ : PILImageResampling = PILImageResampling.BILINEAR , A__ : bool = True , A__ : Dict[str, int] = None , A__ : bool = True , A__ : Union[int, float] = 1 / 2_55 , A__ : bool = True , A__ : Optional[Union[float, List[float]]] = None , A__ : Optional[Union[float, List[float]]] = None , **A__ : int , ) -> None:
'''simple docstring'''
super().__init__(**A__ )
snake_case_ : Optional[int] = size if size is not None else {"shortest_edge": 2_56}
snake_case_ : Dict = get_size_dict(A__ , default_to_square=A__ )
snake_case_ : List[str] = crop_size if crop_size is not None else {"height": 2_24, "width": 2_24}
snake_case_ : Any = get_size_dict(A__ , param_name="crop_size" )
snake_case_ : int = do_resize
snake_case_ : Optional[Any] = size
snake_case_ : Optional[Any] = resample
snake_case_ : Optional[int] = do_center_crop
snake_case_ : List[Any] = crop_size
snake_case_ : List[Any] = do_rescale
snake_case_ : Optional[int] = rescale_factor
snake_case_ : Optional[Any] = do_normalize
snake_case_ : List[Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
snake_case_ : Optional[Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCAmelCase__ ( self : List[str] , A__ : np.ndarray , A__ : Dict[str, int] , A__ : PILImageResampling = PILImageResampling.BICUBIC , A__ : Optional[Union[str, ChannelDimension]] = None , **A__ : str , ) -> np.ndarray:
'''simple docstring'''
snake_case_ : Optional[Any] = get_size_dict(A__ , default_to_square=A__ )
if "shortest_edge" not in size:
raise ValueError(f"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}" )
snake_case_ : Any = get_resize_output_image_size(A__ , size=size["shortest_edge"] , default_to_square=A__ )
return resize(A__ , size=A__ , resample=A__ , data_format=A__ , **A__ )
def UpperCAmelCase__ ( self : int , A__ : np.ndarray , A__ : Dict[str, int] , A__ : Optional[Union[str, ChannelDimension]] = None , **A__ : Union[str, Any] , ) -> np.ndarray:
'''simple docstring'''
snake_case_ : Tuple = get_size_dict(A__ )
if "height" not in size or "width" not in size:
raise ValueError(f"The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}" )
return center_crop(A__ , size=(size["height"], size["width"]) , data_format=A__ , **A__ )
def UpperCAmelCase__ ( self : List[str] , A__ : np.ndarray , A__ : float , A__ : Optional[Union[str, ChannelDimension]] = None , **A__ : Tuple ) -> np.ndarray:
'''simple docstring'''
return rescale(A__ , scale=A__ , data_format=A__ , **A__ )
def UpperCAmelCase__ ( self : Tuple , A__ : np.ndarray , A__ : Union[float, List[float]] , A__ : Union[float, List[float]] , A__ : Optional[Union[str, ChannelDimension]] = None , **A__ : Dict , ) -> np.ndarray:
'''simple docstring'''
return normalize(A__ , mean=A__ , std=A__ , data_format=A__ , **A__ )
def UpperCAmelCase__ ( self : Union[str, Any] , A__ : ImageInput , A__ : Optional[bool] = None , A__ : Dict[str, int] = None , A__ : PILImageResampling = None , A__ : bool = None , A__ : Dict[str, int] = None , A__ : Optional[bool] = None , A__ : Optional[float] = None , A__ : Optional[bool] = None , A__ : Optional[Union[float, List[float]]] = None , A__ : Optional[Union[float, List[float]]] = None , A__ : Optional[Union[str, TensorType]] = None , A__ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **A__ : Union[str, Any] , ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Union[str, Any] = do_resize if do_resize is not None else self.do_resize
snake_case_ : Dict = size if size is not None else self.size
snake_case_ : Optional[Any] = get_size_dict(A__ , default_to_square=A__ )
snake_case_ : Tuple = resample if resample is not None else self.resample
snake_case_ : Union[str, Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
snake_case_ : str = crop_size if crop_size is not None else self.crop_size
snake_case_ : Tuple = get_size_dict(A__ , param_name="crop_size" )
snake_case_ : Dict = do_rescale if do_rescale is not None else self.do_rescale
snake_case_ : str = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case_ : Any = do_normalize if do_normalize is not None else self.do_normalize
snake_case_ : Any = image_mean if image_mean is not None else self.image_mean
snake_case_ : List[str] = image_std if image_std is not None else self.image_std
snake_case_ : Dict = make_list_of_images(A__ )
if not valid_images(A__ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
snake_case_ : Tuple = [to_numpy_array(A__ ) for image in images]
if do_resize:
snake_case_ : Any = [self.resize(image=A__ , size=A__ , resample=A__ ) for image in images]
if do_center_crop:
snake_case_ : List[str] = [self.center_crop(image=A__ , size=A__ ) for image in images]
if do_rescale:
snake_case_ : Any = [self.rescale(image=A__ , scale=A__ ) for image in images]
if do_normalize:
snake_case_ : Union[str, Any] = [self.normalize(image=A__ , mean=A__ , std=A__ ) for image in images]
snake_case_ : Optional[Any] = [to_channel_dimension_format(A__ , A__ ) for image in images]
snake_case_ : Any = {"pixel_values": images}
return BatchFeature(data=A__ , tensor_type=A__ )
def UpperCAmelCase__ ( self : List[str] , A__ : Dict , A__ : List[Tuple] = None ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Tuple = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(A__ ) != len(A__ ):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits" )
if is_torch_tensor(A__ ):
snake_case_ : Dict = target_sizes.numpy()
snake_case_ : int = []
for idx in range(len(A__ ) ):
snake_case_ : List[str] = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="bilinear" , align_corners=A__ )
snake_case_ : int = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(A__ )
else:
snake_case_ : List[Any] = logits.argmax(dim=1 )
snake_case_ : List[Any] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 666 | 1 |
from __future__ import annotations
import math
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: float , lowerCAmelCase_: int ):
snake_case_ : Dict = u
for i in range(1 , lowerCAmelCase_ ):
snake_case_ : Union[str, Any] = temp * (u - i)
return temp
def SCREAMING_SNAKE_CASE_ ( ):
snake_case_ : int = int(input("enter the numbers of values: " ) )
snake_case_ : list[list[float]] = []
for _ in range(lowerCAmelCase_ ):
y.append([] )
for i in range(lowerCAmelCase_ ):
for j in range(lowerCAmelCase_ ):
y[i].append(lowerCAmelCase_ )
snake_case_ : str = 0
print("enter the values of parameters in a list: " )
snake_case_ : List[str] = list(map(lowerCAmelCase_ , input().split() ) )
print("enter the values of corresponding parameters: " )
for i in range(lowerCAmelCase_ ):
snake_case_ : str = float(input() )
snake_case_ : List[str] = int(input("enter the value to interpolate: " ) )
snake_case_ : Optional[int] = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , lowerCAmelCase_ ):
for j in range(n - i ):
snake_case_ : Dict = y[j + 1][i - 1] - y[j][i - 1]
snake_case_ : Dict = y[0][0]
for i in range(1 , lowerCAmelCase_ ):
summ += (ucal(lowerCAmelCase_ , lowerCAmelCase_ ) * y[0][i]) / math.factorial(lowerCAmelCase_ )
print(f"the value at {value} is {summ}" )
if __name__ == "__main__":
main()
| 666 | from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase = {
"configuration_blenderbot": [
"BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlenderbotConfig",
"BlenderbotOnnxConfig",
],
"tokenization_blenderbot": ["BlenderbotTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = ["BlenderbotTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
"BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlenderbotForCausalLM",
"BlenderbotForConditionalGeneration",
"BlenderbotModel",
"BlenderbotPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
"TFBlenderbotForConditionalGeneration",
"TFBlenderbotModel",
"TFBlenderbotPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
"FlaxBlenderbotForConditionalGeneration",
"FlaxBlenderbotModel",
"FlaxBlenderbotPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 666 | 1 |
from typing import Any
class snake_case__ :
def __init__( self : List[str] , A__ : Any ) -> Optional[int]:
'''simple docstring'''
snake_case_ : int = data
snake_case_ : str = None
class snake_case__ :
def __init__( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Any = None
def UpperCAmelCase__ ( self : Tuple ) -> Tuple:
'''simple docstring'''
snake_case_ : List[Any] = self.head
while temp is not None:
print(temp.data , end=" " )
snake_case_ : str = temp.next
print()
def UpperCAmelCase__ ( self : Union[str, Any] , A__ : Any ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : List[Any] = Node(A__ )
snake_case_ : str = self.head
snake_case_ : Optional[Any] = new_node
def UpperCAmelCase__ ( self : List[str] , A__ : str , A__ : Tuple ) -> List[Any]:
'''simple docstring'''
if node_data_a == node_data_a:
return
else:
snake_case_ : Optional[Any] = self.head
while node_a is not None and node_a.data != node_data_a:
snake_case_ : Optional[int] = node_a.next
snake_case_ : Any = self.head
while node_a is not None and node_a.data != node_data_a:
snake_case_ : int = node_a.next
if node_a is None or node_a is None:
return
snake_case_ ,snake_case_ : int = node_a.data, node_a.data
if __name__ == "__main__":
UpperCAmelCase = LinkedList()
for i in range(5, 0, -1):
ll.push(i)
ll.print_list()
ll.swap_nodes(1, 4)
print("After swapping")
ll.print_list()
| 666 | from ...configuration_utils import PretrainedConfig
UpperCAmelCase = {
"google/tapas-base-finetuned-sqa": (
"https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json"
),
"google/tapas-base-finetuned-wtq": (
"https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json"
),
"google/tapas-base-finetuned-wikisql-supervised": (
"https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json"
),
"google/tapas-base-finetuned-tabfact": (
"https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json"
),
}
class snake_case__ ( _UpperCamelCase ):
_SCREAMING_SNAKE_CASE : Dict = "tapas"
def __init__( self : List[Any] , A__ : str=3_05_22 , A__ : Tuple=7_68 , A__ : List[Any]=12 , A__ : Optional[Any]=12 , A__ : Union[str, Any]=30_72 , A__ : Dict="gelu" , A__ : List[Any]=0.1 , A__ : str=0.1 , A__ : List[Any]=10_24 , A__ : Optional[int]=[3, 2_56, 2_56, 2, 2_56, 2_56, 10] , A__ : Union[str, Any]=0.02 , A__ : Tuple=1E-12 , A__ : Tuple=0 , A__ : Any=10.0 , A__ : List[str]=0 , A__ : List[str]=1.0 , A__ : Optional[Any]=None , A__ : Tuple=1.0 , A__ : Union[str, Any]=False , A__ : Any=None , A__ : Union[str, Any]=1.0 , A__ : int=1.0 , A__ : str=False , A__ : int=False , A__ : Optional[Any]="ratio" , A__ : str=None , A__ : int=None , A__ : Dict=64 , A__ : int=32 , A__ : Optional[Any]=False , A__ : List[str]=True , A__ : List[Any]=False , A__ : str=False , A__ : Any=True , A__ : Tuple=False , A__ : str=None , A__ : str=None , **A__ : List[str] , ) -> List[str]:
'''simple docstring'''
super().__init__(pad_token_id=A__ , **A__ )
# BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes)
snake_case_ : int = vocab_size
snake_case_ : int = hidden_size
snake_case_ : Optional[Any] = num_hidden_layers
snake_case_ : int = num_attention_heads
snake_case_ : Optional[int] = hidden_act
snake_case_ : Optional[int] = intermediate_size
snake_case_ : str = hidden_dropout_prob
snake_case_ : Dict = attention_probs_dropout_prob
snake_case_ : Any = max_position_embeddings
snake_case_ : List[Any] = type_vocab_sizes
snake_case_ : str = initializer_range
snake_case_ : Optional[Any] = layer_norm_eps
# Fine-tuning task hyperparameters
snake_case_ : Optional[int] = positive_label_weight
snake_case_ : Dict = num_aggregation_labels
snake_case_ : List[str] = aggregation_loss_weight
snake_case_ : str = use_answer_as_supervision
snake_case_ : int = answer_loss_importance
snake_case_ : Any = use_normalized_answer_loss
snake_case_ : int = huber_loss_delta
snake_case_ : List[Any] = temperature
snake_case_ : str = aggregation_temperature
snake_case_ : List[str] = use_gumbel_for_cells
snake_case_ : List[str] = use_gumbel_for_aggregation
snake_case_ : Dict = average_approximation_function
snake_case_ : List[str] = cell_selection_preference
snake_case_ : Dict = answer_loss_cutoff
snake_case_ : List[str] = max_num_rows
snake_case_ : Union[str, Any] = max_num_columns
snake_case_ : str = average_logits_per_cell
snake_case_ : Union[str, Any] = select_one_column
snake_case_ : Dict = allow_empty_column_selection
snake_case_ : List[Any] = init_cell_selection_weights_to_zero
snake_case_ : str = reset_position_index_per_cell
snake_case_ : List[Any] = disable_per_token_loss
# Aggregation hyperparameters
snake_case_ : List[str] = aggregation_labels
snake_case_ : Union[str, Any] = no_aggregation_label_index
if isinstance(self.aggregation_labels , A__ ):
snake_case_ : Optional[int] = {int(A__ ): v for k, v in aggregation_labels.items()}
| 666 | 1 |
import json
import os
import pickle
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.tokenization_bart import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch
if is_faiss_available():
import faiss
@require_faiss
class snake_case__ ( _UpperCamelCase ):
def UpperCAmelCase__ ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : List[Any] = tempfile.mkdtemp()
snake_case_ : Tuple = 8
# DPR tok
snake_case_ : Union[str, Any] = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
snake_case_ : str = os.path.join(self.tmpdirname , "dpr_tokenizer" )
os.makedirs(A__ , exist_ok=A__ )
snake_case_ : List[str] = os.path.join(A__ , DPR_VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
# BART tok
snake_case_ : Union[str, Any] = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
snake_case_ : List[str] = dict(zip(A__ , range(len(A__ ) ) ) )
snake_case_ : Any = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
snake_case_ : Dict = {"unk_token": "<unk>"}
snake_case_ : List[Any] = os.path.join(self.tmpdirname , "bart_tokenizer" )
os.makedirs(A__ , exist_ok=A__ )
snake_case_ : Any = os.path.join(A__ , BART_VOCAB_FILES_NAMES["vocab_file"] )
snake_case_ : Any = os.path.join(A__ , BART_VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(A__ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(A__ ) )
def UpperCAmelCase__ ( self : Dict ) -> DPRQuestionEncoderTokenizer:
'''simple docstring'''
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , "dpr_tokenizer" ) )
def UpperCAmelCase__ ( self : Dict ) -> DPRContextEncoderTokenizer:
'''simple docstring'''
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , "dpr_tokenizer" ) )
def UpperCAmelCase__ ( self : int ) -> BartTokenizer:
'''simple docstring'''
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , "bart_tokenizer" ) )
def UpperCAmelCase__ ( self : List[Any] ) -> Tuple:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : List[Any] = Dataset.from_dict(
{
"id": ["0", "1"],
"text": ["foo", "bar"],
"title": ["Foo", "Bar"],
"embeddings": [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )],
} )
dataset.add_faiss_index("embeddings" , string_factory="Flat" , metric_type=faiss.METRIC_INNER_PRODUCT )
return dataset
def UpperCAmelCase__ ( self : Any ) -> List[Any]:
'''simple docstring'''
snake_case_ : Optional[Any] = self.get_dummy_dataset()
snake_case_ : Tuple = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , )
with patch("transformers.models.rag.retrieval_rag.load_dataset" ) as mock_load_dataset:
snake_case_ : Any = dataset
snake_case_ : List[Any] = RagRetriever(
A__ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
return retriever
def UpperCAmelCase__ ( self : str , A__ : bool ) -> List[str]:
'''simple docstring'''
snake_case_ : int = self.get_dummy_dataset()
snake_case_ : Tuple = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name="custom" , )
if from_disk:
snake_case_ : Tuple = os.path.join(self.tmpdirname , "dataset" )
snake_case_ : Any = os.path.join(self.tmpdirname , "index.faiss" )
dataset.get_index("embeddings" ).save(os.path.join(self.tmpdirname , "index.faiss" ) )
dataset.drop_index("embeddings" )
dataset.save_to_disk(os.path.join(self.tmpdirname , "dataset" ) )
del dataset
snake_case_ : List[Any] = RagRetriever(
A__ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
else:
snake_case_ : List[str] = RagRetriever(
A__ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , A__ ) , )
return retriever
def UpperCAmelCase__ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Dict = Dataset.from_dict(
{
"id": ["0", "1"],
"text": ["foo", "bar"],
"title": ["Foo", "Bar"],
"embeddings": [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )],
} )
dataset.add_faiss_index("embeddings" , string_factory="Flat" , metric_type=faiss.METRIC_INNER_PRODUCT )
snake_case_ : str = os.path.join(self.tmpdirname , "hf_bert_base.hnswSQ8_correct_phi_128.c_index" )
dataset.save_faiss_index("embeddings" , index_file_name + ".index.dpr" )
pickle.dump(dataset["id"] , open(index_file_name + ".index_meta.dpr" , "wb" ) )
snake_case_ : List[str] = os.path.join(self.tmpdirname , "psgs_w100.tsv.pkl" )
snake_case_ : Any = {sample["id"]: [sample["text"], sample["title"]] for sample in dataset}
pickle.dump(A__ , open(A__ , "wb" ) )
snake_case_ : Optional[Any] = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name="legacy" , index_path=self.tmpdirname , )
snake_case_ : int = RagRetriever(
A__ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() )
return retriever
def UpperCAmelCase__ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
snake_case_ : List[Any] = 1
snake_case_ : Union[str, Any] = self.get_dummy_canonical_hf_index_retriever()
snake_case_ : Union[str, Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
snake_case_ ,snake_case_ ,snake_case_ : Optional[int] = retriever.retrieve(A__ , n_docs=A__ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(A__ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["embeddings", "id", "text", "title"] )
self.assertEqual(len(doc_dicts[0]["id"] ) , A__ )
self.assertEqual(doc_dicts[0]["id"][0] , "1" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["id"][0] , "0" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def UpperCAmelCase__ ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : List[Any] = self.get_dummy_canonical_hf_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
with patch("transformers.models.rag.retrieval_rag.load_dataset" ) as mock_load_dataset:
snake_case_ : List[str] = self.get_dummy_dataset()
retriever.save_pretrained(A__ )
snake_case_ : str = RagRetriever.from_pretrained(A__ )
self.assertIsInstance(A__ , A__ )
snake_case_ : Dict = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
snake_case_ : str = retriever.retrieve(A__ , n_docs=1 )
self.assertTrue(out is not None )
def UpperCAmelCase__ ( self : int ) -> int:
'''simple docstring'''
snake_case_ : List[Any] = 1
snake_case_ : Tuple = self.get_dummy_custom_hf_index_retriever(from_disk=A__ )
snake_case_ : Optional[int] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
snake_case_ ,snake_case_ ,snake_case_ : Any = retriever.retrieve(A__ , n_docs=A__ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(A__ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["embeddings", "id", "text", "title"] )
self.assertEqual(len(doc_dicts[0]["id"] ) , A__ )
self.assertEqual(doc_dicts[0]["id"][0] , "1" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["id"][0] , "0" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def UpperCAmelCase__ ( self : Dict ) -> str:
'''simple docstring'''
snake_case_ : Optional[int] = self.get_dummy_custom_hf_index_retriever(from_disk=A__ )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(A__ )
snake_case_ : List[str] = RagRetriever.from_pretrained(A__ )
self.assertIsInstance(A__ , A__ )
snake_case_ : Optional[Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
snake_case_ : Any = retriever.retrieve(A__ , n_docs=1 )
self.assertTrue(out is not None )
def UpperCAmelCase__ ( self : Any ) -> Tuple:
'''simple docstring'''
snake_case_ : str = 1
snake_case_ : Tuple = self.get_dummy_custom_hf_index_retriever(from_disk=A__ )
snake_case_ : Optional[int] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
snake_case_ ,snake_case_ ,snake_case_ : Any = retriever.retrieve(A__ , n_docs=A__ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(A__ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["embeddings", "id", "text", "title"] )
self.assertEqual(len(doc_dicts[0]["id"] ) , A__ )
self.assertEqual(doc_dicts[0]["id"][0] , "1" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["id"][0] , "0" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def UpperCAmelCase__ ( self : int ) -> Tuple:
'''simple docstring'''
snake_case_ : str = self.get_dummy_custom_hf_index_retriever(from_disk=A__ )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(A__ )
snake_case_ : Optional[int] = RagRetriever.from_pretrained(A__ )
self.assertIsInstance(A__ , A__ )
snake_case_ : Any = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
snake_case_ : List[Any] = retriever.retrieve(A__ , n_docs=1 )
self.assertTrue(out is not None )
def UpperCAmelCase__ ( self : List[str] ) -> List[Any]:
'''simple docstring'''
snake_case_ : int = 1
snake_case_ : List[str] = self.get_dummy_legacy_index_retriever()
snake_case_ : Dict = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
snake_case_ ,snake_case_ ,snake_case_ : str = retriever.retrieve(A__ , n_docs=A__ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(A__ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["text", "title"] )
self.assertEqual(len(doc_dicts[0]["text"] ) , A__ )
self.assertEqual(doc_dicts[0]["text"][0] , "bar" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["text"][0] , "foo" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def UpperCAmelCase__ ( self : int ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Optional[Any] = self.get_dummy_legacy_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(A__ )
snake_case_ : Optional[int] = RagRetriever.from_pretrained(A__ )
self.assertIsInstance(A__ , A__ )
snake_case_ : str = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
snake_case_ : List[Any] = retriever.retrieve(A__ , n_docs=1 )
self.assertTrue(out is not None )
@require_torch
@require_tokenizers
@require_sentencepiece
def UpperCAmelCase__ ( self : str ) -> Optional[Any]:
'''simple docstring'''
import torch
snake_case_ : Optional[int] = 1
snake_case_ : Optional[int] = self.get_dummy_canonical_hf_index_retriever()
snake_case_ : str = [[5, 7], [10, 11]]
snake_case_ : Tuple = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
snake_case_ : List[str] = retriever(A__ , A__ , prefix=retriever.config.generator.prefix , n_docs=A__ )
snake_case_ ,snake_case_ ,snake_case_ : Tuple = (
out["context_input_ids"],
out["context_attention_mask"],
out["retrieved_doc_embeds"],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(A__ , A__ )
self.assertIsInstance(A__ , A__ )
self.assertIsInstance(A__ , np.ndarray )
snake_case_ : Any = retriever(
A__ , A__ , prefix=retriever.config.generator.prefix , n_docs=A__ , return_tensors="pt" , )
snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ : Any = ( # noqa: F841
out["context_input_ids"],
out["context_attention_mask"],
out["retrieved_doc_embeds"],
out["doc_ids"],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(A__ , torch.Tensor )
self.assertIsInstance(A__ , torch.Tensor )
self.assertIsInstance(A__ , torch.Tensor )
@require_torch
@require_tokenizers
@require_sentencepiece
def UpperCAmelCase__ ( self : str ) -> Any:
'''simple docstring'''
snake_case_ : Optional[int] = self.get_dpr_ctx_encoder_tokenizer()
snake_case_ : str = 1
snake_case_ : List[str] = self.get_dummy_custom_hf_index_retriever(from_disk=A__ )
retriever.set_ctx_encoder_tokenizer(A__ )
snake_case_ : Optional[Any] = [[5, 7], [10, 11]]
snake_case_ : Tuple = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
snake_case_ : int = retriever(A__ , A__ , prefix=retriever.config.generator.prefix , n_docs=A__ )
self.assertEqual(
len(A__ ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs
self.assertEqual(
all(k in out for k in ("tokenized_doc_ids", "tokenized_doc_attention_mask") ) , A__ ) # check for doc token related keys in dictionary.
| 666 | import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class snake_case__ ( datasets.BeamBasedBuilder ):
def UpperCAmelCase__ ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
return datasets.DatasetInfo(
features=datasets.Features({"content": datasets.Value("string" )} ) , supervised_keys=A__ , )
def UpperCAmelCase__ ( self : Optional[Any] , A__ : str , A__ : str ) -> Optional[int]:
'''simple docstring'''
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"examples": get_test_dummy_examples()} )]
def UpperCAmelCase__ ( self : int , A__ : Optional[int] , A__ : Dict ) -> Optional[Any]:
'''simple docstring'''
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(A__ )
class snake_case__ ( datasets.BeamBasedBuilder ):
def UpperCAmelCase__ ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
return datasets.DatasetInfo(
features=datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string" )} )} ) , supervised_keys=A__ , )
def UpperCAmelCase__ ( self : Any , A__ : List[str] , A__ : str ) -> Optional[int]:
'''simple docstring'''
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"examples": get_test_nested_examples()} )
]
def UpperCAmelCase__ ( self : List[Any] , A__ : List[str] , A__ : Optional[int] ) -> List[str]:
'''simple docstring'''
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(A__ )
def SCREAMING_SNAKE_CASE_ ( ):
return [(i, {"content": content}) for i, content in enumerate(["foo", "bar", "foobar"] )]
def SCREAMING_SNAKE_CASE_ ( ):
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(["foo", "bar", "foobar"] )]
class snake_case__ ( _UpperCamelCase ):
@require_beam
def UpperCAmelCase__ ( self : str ) -> List[str]:
'''simple docstring'''
snake_case_ : Union[str, Any] = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
snake_case_ : Dict = DummyBeamDataset(cache_dir=A__ , beam_runner="DirectRunner" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(A__ , builder.name , "default" , "0.0.0" , f"{builder.name}-train.arrow" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"content": datasets.Value("string" )} ) )
snake_case_ : Optional[int] = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , A__ )
self.assertEqual(dset["train"].info.splits["train"].num_examples , A__ )
self.assertDictEqual(dset["train"][0] , get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset["train"][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(A__ , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
@require_beam
def UpperCAmelCase__ ( self : str ) -> Optional[Any]:
'''simple docstring'''
import apache_beam as beam
snake_case_ : Tuple = beam.io.parquetio.WriteToParquet
snake_case_ : Union[str, Any] = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
snake_case_ : List[Any] = DummyBeamDataset(cache_dir=A__ , beam_runner="DirectRunner" )
with patch("apache_beam.io.parquetio.WriteToParquet" ) as write_parquet_mock:
snake_case_ : int = partial(A__ , num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
A__ , builder.name , "default" , "0.0.0" , f"{builder.name}-train-00000-of-00002.arrow" ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
A__ , builder.name , "default" , "0.0.0" , f"{builder.name}-train-00000-of-00002.arrow" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"content": datasets.Value("string" )} ) )
snake_case_ : Optional[Any] = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , A__ )
self.assertEqual(dset["train"].info.splits["train"].num_examples , A__ )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset["train"]["content"] ) , sorted(["foo", "bar", "foobar"] ) )
self.assertTrue(
os.path.exists(os.path.join(A__ , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
@require_beam
def UpperCAmelCase__ ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_cache_dir:
snake_case_ : Tuple = DummyBeamDataset(cache_dir=A__ )
self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare )
@require_beam
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Optional[int] = len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
snake_case_ : List[str] = NestedBeamDataset(cache_dir=A__ , beam_runner="DirectRunner" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(A__ , builder.name , "default" , "0.0.0" , f"{builder.name}-train.arrow" ) ) )
self.assertDictEqual(
builder.info.features , datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string" )} )} ) )
snake_case_ : int = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , A__ )
self.assertEqual(dset["train"].info.splits["train"].num_examples , A__ )
self.assertDictEqual(dset["train"][0] , get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset["train"][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(A__ , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
| 666 | 1 |
import inspect
import unittest
import numpy as np
from transformers import ViTConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel
class snake_case__ ( unittest.TestCase ):
def __init__( self : List[str] , A__ : List[Any] , A__ : Any=13 , A__ : Union[str, Any]=30 , A__ : Optional[Any]=2 , A__ : Union[str, Any]=3 , A__ : int=True , A__ : List[str]=True , A__ : Dict=32 , A__ : Any=5 , A__ : Union[str, Any]=4 , A__ : Tuple=37 , A__ : Optional[int]="gelu" , A__ : Optional[int]=0.1 , A__ : Optional[int]=0.1 , A__ : List[Any]=10 , A__ : Union[str, Any]=0.02 , ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : int = parent
snake_case_ : Any = batch_size
snake_case_ : Tuple = image_size
snake_case_ : Union[str, Any] = patch_size
snake_case_ : int = num_channels
snake_case_ : str = is_training
snake_case_ : str = use_labels
snake_case_ : int = hidden_size
snake_case_ : Union[str, Any] = num_hidden_layers
snake_case_ : int = num_attention_heads
snake_case_ : Dict = intermediate_size
snake_case_ : Union[str, Any] = hidden_act
snake_case_ : Union[str, Any] = hidden_dropout_prob
snake_case_ : str = attention_probs_dropout_prob
snake_case_ : Tuple = type_sequence_label_size
snake_case_ : Dict = initializer_range
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
snake_case_ : Optional[Any] = (image_size // patch_size) ** 2
snake_case_ : int = num_patches + 1
def UpperCAmelCase__ ( self : Tuple ) -> Any:
'''simple docstring'''
snake_case_ : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case_ : Optional[int] = ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=A__ , initializer_range=self.initializer_range , )
return config, pixel_values
def UpperCAmelCase__ ( self : int , A__ : List[Any] , A__ : str ) -> Dict:
'''simple docstring'''
snake_case_ : Union[str, Any] = FlaxViTModel(config=A__ )
snake_case_ : Union[str, Any] = model(A__ )
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token)
snake_case_ : str = (self.image_size, self.image_size)
snake_case_ : Any = (self.patch_size, self.patch_size)
snake_case_ : Tuple = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, num_patches + 1, self.hidden_size) )
def UpperCAmelCase__ ( self : Union[str, Any] , A__ : Optional[int] , A__ : List[Any] ) -> str:
'''simple docstring'''
snake_case_ : Any = self.type_sequence_label_size
snake_case_ : Any = FlaxViTForImageClassification(config=A__ )
snake_case_ : List[Any] = model(A__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
snake_case_ : str = 1
snake_case_ : Tuple = FlaxViTForImageClassification(A__ )
snake_case_ : Any = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
snake_case_ : Optional[int] = model(A__ )
def UpperCAmelCase__ ( self : Any ) -> Any:
'''simple docstring'''
snake_case_ : Union[str, Any] = self.prepare_config_and_inputs()
(
(
snake_case_
) ,(
snake_case_
) ,
) : Tuple = config_and_inputs
snake_case_ : Optional[int] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_flax
class snake_case__ ( _UpperCamelCase , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : str = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else ()
def UpperCAmelCase__ ( self : Tuple ) -> None:
'''simple docstring'''
snake_case_ : List[Any] = FlaxViTModelTester(self )
snake_case_ : Tuple = ConfigTester(self , config_class=A__ , has_text_modality=A__ , hidden_size=37 )
def UpperCAmelCase__ ( self : str ) -> List[str]:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : List[str] ) -> List[Any]:
'''simple docstring'''
snake_case_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A__ )
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A__ )
def UpperCAmelCase__ ( self : List[str] ) -> List[str]:
'''simple docstring'''
snake_case_ ,snake_case_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ : List[Any] = model_class(A__ )
snake_case_ : str = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case_ : Optional[int] = [*signature.parameters.keys()]
snake_case_ : Dict = ["pixel_values"]
self.assertListEqual(arg_names[:1] , A__ )
def UpperCAmelCase__ ( self : int ) -> int:
'''simple docstring'''
snake_case_ ,snake_case_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
snake_case_ : Tuple = self._prepare_for_class(A__ , A__ )
snake_case_ : int = model_class(A__ )
@jax.jit
def model_jitted(A__ : Any , **A__ : Union[str, Any] ):
return model(pixel_values=A__ , **A__ )
with self.subTest("JIT Enabled" ):
snake_case_ : Dict = model_jitted(**A__ ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
snake_case_ : List[str] = model_jitted(**A__ ).to_tuple()
self.assertEqual(len(A__ ) , len(A__ ) )
for jitted_output, output in zip(A__ , A__ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
snake_case_ : Any = model_class_name.from_pretrained("google/vit-base-patch16-224" )
snake_case_ : List[Any] = model(np.ones((1, 3, 2_24, 2_24) ) )
self.assertIsNotNone(A__ )
| 666 | import pytest
from datasets import inspect_metric, list_metrics, load_metric
@pytest.fixture
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: int ):
monkeypatch.setattr("datasets.utils.deprecation_utils._emitted_deprecation_warnings" , set() )
@pytest.fixture
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: Tuple ):
class snake_case__ :
def __init__( self : Any , A__ : List[str] ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : List[Any] = metric_id
class snake_case__ :
_SCREAMING_SNAKE_CASE : List[str] = [MetricMock(_UpperCamelCase ) for metric_id in ["accuracy", "mse", "precision", "codeparrot/apps_metric"]]
def UpperCAmelCase__ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
return self._metrics
monkeypatch.setattr("datasets.inspect.huggingface_hub" , HfhMock() )
@pytest.mark.parametrize(
"func, args" , [(load_metric, ("metrics/mse",)), (list_metrics, ()), (inspect_metric, ("metrics/mse", "tmp_path"))] )
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: Tuple , lowerCAmelCase_: int , lowerCAmelCase_: List[Any] , lowerCAmelCase_: Any , lowerCAmelCase_: List[str] ):
if "tmp_path" in args:
snake_case_ : List[Any] = tuple(arg if arg != "tmp_path" else tmp_path for arg in args )
with pytest.warns(lowerCAmelCase_ , match="https://huggingface.co/docs/evaluate" ):
func(*lowerCAmelCase_ )
| 666 | 1 |
import argparse
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_path", default=None, type=str, required=True, help="Path to the checkpoint to convert."
)
parser.add_argument(
"--original_config_file",
type=str,
required=True,
help="The YAML config file corresponding to the original architecture.",
)
parser.add_argument(
"--num_in_channels",
default=None,
type=int,
help="The number of input channels. If `None` number of input channels will be automatically inferred.",
)
parser.add_argument(
"--image_size",
default=5_1_2,
type=int,
help=(
"The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2"
" Base. Use 768 for Stable Diffusion v2."
),
)
parser.add_argument(
"--extract_ema",
action="store_true",
help=(
"Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights"
" or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield"
" higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning."
),
)
parser.add_argument(
"--upcast_attention",
action="store_true",
help=(
"Whether the attention computation should always be upcasted. This is necessary when running stable"
" diffusion 2.1."
),
)
parser.add_argument(
"--from_safetensors",
action="store_true",
help="If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.",
)
parser.add_argument(
"--to_safetensors",
action="store_true",
help="Whether to store pipeline in safetensors format or not.",
)
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
parser.add_argument("--device", type=str, help="Device to use (e.g. cpu, cuda:0, cuda:1, etc.)")
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: Tuple ):
if string == "True":
return True
elif string == "False":
return False
else:
raise ValueError(f"could not parse string as bool {string}" )
parser.add_argument(
"--use_linear_projection", help="Override for use linear projection", required=False, type=parse_bool
)
parser.add_argument("--cross_attention_dim", help="Override for cross attention_dim", required=False, type=int)
UpperCAmelCase = parser.parse_args()
UpperCAmelCase = download_controlnet_from_original_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
extract_ema=args.extract_ema,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
use_linear_projection=args.use_linear_projection,
cross_attention_dim=args.cross_attention_dim,
)
controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 666 | from __future__ import annotations
import bisect
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: list[int] , lowerCAmelCase_: int , lowerCAmelCase_: int = 0 , lowerCAmelCase_: int = -1 ):
if hi < 0:
snake_case_ : Any = len(lowerCAmelCase_ )
while lo < hi:
snake_case_ : List[Any] = lo + (hi - lo) // 2
if sorted_collection[mid] < item:
snake_case_ : Tuple = mid + 1
else:
snake_case_ : Dict = mid
return lo
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: list[int] , lowerCAmelCase_: int , lowerCAmelCase_: int = 0 , lowerCAmelCase_: int = -1 ):
if hi < 0:
snake_case_ : Optional[Any] = len(lowerCAmelCase_ )
while lo < hi:
snake_case_ : Union[str, Any] = lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
snake_case_ : Optional[Any] = mid + 1
else:
snake_case_ : Tuple = mid
return lo
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: list[int] , lowerCAmelCase_: int , lowerCAmelCase_: int = 0 , lowerCAmelCase_: int = -1 ):
sorted_collection.insert(bisect_left(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) , lowerCAmelCase_ )
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: list[int] , lowerCAmelCase_: int , lowerCAmelCase_: int = 0 , lowerCAmelCase_: int = -1 ):
sorted_collection.insert(bisect_right(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) , lowerCAmelCase_ )
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: list[int] , lowerCAmelCase_: int ):
snake_case_ : Dict = 0
snake_case_ : Tuple = len(lowerCAmelCase_ ) - 1
while left <= right:
snake_case_ : int = left + (right - left) // 2
snake_case_ : Optional[Any] = sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
snake_case_ : Optional[Any] = midpoint - 1
else:
snake_case_ : Optional[int] = midpoint + 1
return None
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: list[int] , lowerCAmelCase_: int ):
snake_case_ : Optional[int] = bisect.bisect_left(lowerCAmelCase_ , lowerCAmelCase_ )
if index != len(lowerCAmelCase_ ) and sorted_collection[index] == item:
return index
return None
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: list[int] , lowerCAmelCase_: int , lowerCAmelCase_: int , lowerCAmelCase_: int ):
if right < left:
return None
snake_case_ : List[Any] = left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , midpoint - 1 )
else:
return binary_search_by_recursion(lowerCAmelCase_ , lowerCAmelCase_ , midpoint + 1 , lowerCAmelCase_ )
if __name__ == "__main__":
UpperCAmelCase = input("Enter numbers separated by comma:\n").strip()
UpperCAmelCase = sorted(int(item) for item in user_input.split(","))
UpperCAmelCase = int(input("Enter a single number to be found in the list:\n"))
UpperCAmelCase = binary_search(collection, target)
if result is None:
print(F"{target} was not found in {collection}.")
else:
print(F"{target} was found at position {result} in {collection}.")
| 666 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
SCREAMING_SNAKE_CASE__ : Dict = {
"""configuration_canine""": ["""CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CanineConfig"""],
"""tokenization_canine""": ["""CanineTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : str = [
"""CANINE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CanineForMultipleChoice""",
"""CanineForQuestionAnswering""",
"""CanineForSequenceClassification""",
"""CanineForTokenClassification""",
"""CanineLayer""",
"""CanineModel""",
"""CaninePreTrainedModel""",
"""load_tf_weights_in_canine""",
]
if TYPE_CHECKING:
from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig
from .tokenization_canine import CanineTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_canine import (
CANINE_PRETRAINED_MODEL_ARCHIVE_LIST,
CanineForMultipleChoice,
CanineForQuestionAnswering,
CanineForSequenceClassification,
CanineForTokenClassification,
CanineLayer,
CanineModel,
CaninePreTrainedModel,
load_tf_weights_in_canine,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 0 | import inspect
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel, VQModel
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class snake_case__ ( _UpperCamelCase ):
def __init__( self : Union[str, Any] , A__ : VQModel , A__ : UNetaDModel , A__ : DDIMScheduler ) -> List[Any]:
'''simple docstring'''
super().__init__()
self.register_modules(vqvae=A__ , unet=A__ , scheduler=A__ )
@torch.no_grad()
def __call__( self : str , A__ : int = 1 , A__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , A__ : float = 0.0 , A__ : int = 50 , A__ : Optional[str] = "pil" , A__ : bool = True , **A__ : Optional[Any] , ) -> Union[Tuple, ImagePipelineOutput]:
'''simple docstring'''
snake_case_ : Optional[int] = randn_tensor(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=A__ , )
snake_case_ : List[Any] = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
snake_case_ : Any = latents * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(A__ )
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
snake_case_ : Union[str, Any] = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
snake_case_ : List[Any] = {}
if accepts_eta:
snake_case_ : int = eta
for t in self.progress_bar(self.scheduler.timesteps ):
snake_case_ : Union[str, Any] = self.scheduler.scale_model_input(A__ , A__ )
# predict the noise residual
snake_case_ : Dict = self.unet(A__ , A__ ).sample
# compute the previous noisy sample x_t -> x_t-1
snake_case_ : Union[str, Any] = self.scheduler.step(A__ , A__ , A__ , **A__ ).prev_sample
# decode the image latents with the VAE
snake_case_ : int = self.vqvae.decode(A__ ).sample
snake_case_ : Dict = (image / 2 + 0.5).clamp(0 , 1 )
snake_case_ : Dict = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
snake_case_ : Optional[int] = self.numpy_to_pil(A__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=A__ )
| 666 | 0 |
from PIL import Image
def _A ( _lowercase ) -> Image:
"""simple docstring"""
__UpperCamelCase, __UpperCamelCase = image.size
__UpperCamelCase = 0
__UpperCamelCase = image.load()
for i in range(_lowercase ):
for j in range(_lowercase ):
__UpperCamelCase = pixels[j, i]
mean += pixel
mean //= width * height
for j in range(_lowercase ):
for i in range(_lowercase ):
__UpperCamelCase = 2_55 if pixels[i, j] > mean else 0
return image
if __name__ == "__main__":
__snake_case = mean_threshold(Image.open('''path_to_image''').convert('''L'''))
image.save('''output_image_path''')
| 1 | from decimal import Decimal, getcontext
from math import ceil, factorial
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: int ):
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
raise TypeError("Undefined for non-integers" )
elif precision < 1:
raise ValueError("Undefined for non-natural numbers" )
snake_case_ : List[str] = precision
snake_case_ : Union[str, Any] = ceil(precision / 1_4 )
snake_case_ : List[str] = 4_2_6_8_8_0 * Decimal(1_0_0_0_5 ).sqrt()
snake_case_ : str = 1
snake_case_ : List[str] = 1_3_5_9_1_4_0_9
snake_case_ : str = Decimal(lowerCAmelCase_ )
for k in range(1 , lowerCAmelCase_ ):
snake_case_ : Tuple = factorial(6 * k ) // (factorial(3 * k ) * factorial(lowerCAmelCase_ ) ** 3)
linear_term += 5_4_5_1_4_0_1_3_4
exponential_term *= -2_6_2_5_3_7_4_1_2_6_4_0_7_6_8_0_0_0
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
UpperCAmelCase = 5_0
print(F"The first {n} digits of pi is: {pi(n)}")
| 666 | 0 |
import collections
import os
import re
from pathlib import Path
UpperCAmelCase_ = """src/transformers"""
# Matches is_xxx_available()
UpperCAmelCase_ = re.compile(r"""is\_([a-z_]*)_available()""")
# Catches a one-line _import_struct = {xxx}
UpperCAmelCase_ = re.compile(r"""^_import_structure\s+=\s+\{([^\}]+)\}""")
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
UpperCAmelCase_ = re.compile(r"""\s+\"\S*\":\s+\[([^\]]*)\]""")
# Catches a line if not is_foo_available
UpperCAmelCase_ = re.compile(r"""^\s*if\s+not\s+is\_[a-z_]*\_available\(\)""")
# Catches a line _import_struct["bla"].append("foo")
UpperCAmelCase_ = re.compile(r"""^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)""")
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
UpperCAmelCase_ = re.compile(r"""^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]""")
# Catches a line with an object between quotes and a comma: "MyModel",
UpperCAmelCase_ = re.compile(r"""^\s+\"([^\"]+)\",""")
# Catches a line with objects between brackets only: ["foo", "bar"],
UpperCAmelCase_ = re.compile(r"""^\s+\[([^\]]+)\]""")
# Catches a line with from foo import bar, bla, boo
UpperCAmelCase_ = re.compile(r"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""")
# Catches a line with try:
UpperCAmelCase_ = re.compile(r"""^\s*try:""")
# Catches a line with else:
UpperCAmelCase_ = re.compile(r"""^\s*else:""")
def SCREAMING_SNAKE_CASE_ ( _snake_case :Optional[int] ) -> Any:
if _re_test_backend.search(_snake_case ) is None:
return None
_A = [b[0] for b in _re_backend.findall(_snake_case )]
backends.sort()
return "_and_".join(_snake_case )
def SCREAMING_SNAKE_CASE_ ( _snake_case :Any ) -> Any:
with open(_snake_case , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
_A = f.readlines()
_A = 0
while line_index < len(_snake_case ) and not lines[line_index].startswith('''_import_structure = {''' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(_snake_case ):
return None
# First grab the objects without a specific backend in _import_structure
_A = []
while not lines[line_index].startswith('''if TYPE_CHECKING''' ) and find_backend(lines[line_index] ) is None:
_A = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(_snake_case ):
_A = _re_one_line_import_struct.search(_snake_case ).groups()[0]
_A = re.findall(r'''\[([^\]]+)\]''' , _snake_case )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(''', ''' )] )
line_index += 1
continue
_A = _re_import_struct_key_value.search(_snake_case )
if single_line_import_search is not None:
_A = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(''', ''' ) if len(_snake_case ) > 0]
objects.extend(_snake_case )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
line_index += 1
_A = {'''none''': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('''if TYPE_CHECKING''' ):
# If the line is an if not is_backend_available, we grab all objects associated.
_A = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
_A = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
_A = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 4 ):
_A = lines[line_index]
if _re_import_struct_add_one.search(_snake_case ) is not None:
objects.append(_re_import_struct_add_one.search(_snake_case ).groups()[0] )
elif _re_import_struct_add_many.search(_snake_case ) is not None:
_A = _re_import_struct_add_many.search(_snake_case ).groups()[0].split(''', ''' )
_A = [obj[1:-1] for obj in imports if len(_snake_case ) > 0]
objects.extend(_snake_case )
elif _re_between_brackets.search(_snake_case ) is not None:
_A = _re_between_brackets.search(_snake_case ).groups()[0].split(''', ''' )
_A = [obj[1:-1] for obj in imports if len(_snake_case ) > 0]
objects.extend(_snake_case )
elif _re_quote_object.search(_snake_case ) is not None:
objects.append(_re_quote_object.search(_snake_case ).groups()[0] )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
elif line.startswith(''' ''' * 12 + '''"''' ):
objects.append(line[13:-3] )
line_index += 1
_A = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
_A = []
while (
line_index < len(_snake_case )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('''else''' )
):
_A = lines[line_index]
_A = _re_import.search(_snake_case )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 8 ):
objects.append(line[8:-2] )
line_index += 1
_A = {'''none''': objects}
# Let's continue with backend-specific objects
while line_index < len(_snake_case ):
# If the line is an if is_backend_available, we grab all objects associated.
_A = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
_A = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
_A = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 8 ):
_A = lines[line_index]
_A = _re_import.search(_snake_case )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 12 ):
objects.append(line[12:-2] )
line_index += 1
_A = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def SCREAMING_SNAKE_CASE_ ( _snake_case :List[str] , _snake_case :Dict ) -> Any:
def find_duplicates(_snake_case :Any ):
return [k for k, v in collections.Counter(_snake_case ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
_A = []
for key in import_dict_objects.keys():
_A = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F'''Duplicate _import_structure definitions for: {duplicate_imports}''' )
_A = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F'''Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}''' )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
_A = '''base imports''' if key == '''none''' else F'''{key} backend'''
errors.append(F'''Differences for {name}:''' )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F''' {a} in TYPE_HINT but not in _import_structure.''' )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F''' {a} in _import_structure but not in TYPE_HINT.''' )
return errors
def SCREAMING_SNAKE_CASE_ ( ) -> int:
_A = []
for root, _, files in os.walk(_snake_case ):
if "__init__.py" in files:
_A = os.path.join(_snake_case , '''__init__.py''' )
_A = parse_init(_snake_case )
if objects is not None:
_A = analyze_results(*_snake_case )
if len(_snake_case ) > 0:
_A = F'''Problem in {fname}, both halves do not define the same objects.\n{errors[0]}'''
failures.append('''\n'''.join(_snake_case ) )
if len(_snake_case ) > 0:
raise ValueError('''\n\n'''.join(_snake_case ) )
def SCREAMING_SNAKE_CASE_ ( ) -> Optional[int]:
_A = []
for path, directories, files in os.walk(_snake_case ):
for folder in directories:
# Ignore private modules
if folder.startswith('''_''' ):
directories.remove(_snake_case )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(_snake_case ) / folder).glob('''*.py''' ) ) ) == 0:
continue
_A = str((Path(_snake_case ) / folder).relative_to(_snake_case ) )
_A = short_path.replace(os.path.sep , '''.''' )
submodules.append(_snake_case )
for fname in files:
if fname == "__init__.py":
continue
_A = str((Path(_snake_case ) / fname).relative_to(_snake_case ) )
_A = short_path.replace('''.py''' , '''''' ).replace(os.path.sep , '''.''' )
if len(submodule.split('''.''' ) ) == 1:
submodules.append(_snake_case )
return submodules
UpperCAmelCase_ = [
"""convert_pytorch_checkpoint_to_tf2""",
"""modeling_flax_pytorch_utils""",
"""models.esm.openfold_utils""",
]
def SCREAMING_SNAKE_CASE_ ( ) -> List[str]:
# This is to make sure the transformers module imported is the one in the repo.
from transformers.utils import direct_transformers_import
_A = direct_transformers_import(_snake_case )
_A = set(transformers._import_structure.keys() )
# This contains all the base keys of the _import_structure object defined in the init, but if the user is missing
# some optional dependencies, they may not have all of them. Thus we read the init to read all additions and
# (potentiall re-) add them.
with open(os.path.join(_snake_case , '''__init__.py''' ) , '''r''' ) as f:
_A = f.read()
import_structure_keys.update(set(re.findall(r'''import_structure\[\"([^\"]*)\"\]''' , _snake_case ) ) )
_A = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in import_structure_keys
]
if len(_snake_case ) > 0:
_A = '''\n'''.join(F'''- {module}''' for module in module_not_registered )
raise ValueError(
'''The following submodules are not properly registed in the main init of Transformers:\n'''
F'''{list_of_modules}\n'''
'''Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.''' )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 2 | def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: int = 1_0_0_0 ):
snake_case_ ,snake_case_ : List[str] = 1, 1
snake_case_ : List[str] = 2
while True:
snake_case_ : Tuple = 0
snake_case_ : Union[str, Any] = fa + fa
snake_case_ ,snake_case_ : str = fa, f
index += 1
for _ in str(lowerCAmelCase_ ):
i += 1
if i == n:
break
return index
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 666 | 0 |
'''simple docstring'''
def A_( A : int = 5000_0000):
UpperCamelCase = set()
UpperCamelCase = int((limit - 24) ** (1 / 2))
UpperCamelCase = set(range(3 , prime_square_limit + 1 , 2))
primes.add(2)
for p in range(3 , prime_square_limit + 1 , 2):
if p not in primes:
continue
primes.difference_update(set(range(p * p , prime_square_limit + 1 , A)))
for primea in primes:
UpperCamelCase = primea * primea
for primea in primes:
UpperCamelCase = primea * primea * primea
if square + cube >= limit - 16:
break
for primea in primes:
UpperCamelCase = primea * primea * primea * primea
UpperCamelCase = square + cube + tetr
if total >= limit:
break
ret.add(A)
return len(A)
if __name__ == "__main__":
print(f"""{solution() = }""")
| 3 | from __future__ import annotations
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: list[int | float] , lowerCAmelCase_: int , lowerCAmelCase_: int ):
if len(lowerCAmelCase_ ) == 0:
raise ValueError("find_max() arg is an empty sequence" )
if (
left >= len(lowerCAmelCase_ )
or left < -len(lowerCAmelCase_ )
or right >= len(lowerCAmelCase_ )
or right < -len(lowerCAmelCase_ )
):
raise IndexError("list index out of range" )
if left == right:
return nums[left]
snake_case_ : List[Any] = (left + right) >> 1 # the middle
snake_case_ : Dict = find_max(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) # find max in range[left, mid]
snake_case_ : int = find_max(lowerCAmelCase_ , mid + 1 , lowerCAmelCase_ ) # find max in range[mid + 1, right]
return left_max if left_max >= right_max else right_max
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 666 | 0 |
"""simple docstring"""
from __future__ import annotations
from cmath import sqrt
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int ):
if a == 0:
raise ValueError('Coefficient \'a\' must not be zero.' )
lowerCAmelCase = b * b - 4 * a * c
lowerCAmelCase = (-b + sqrt(_UpperCAmelCase )) / (2 * a)
lowerCAmelCase = (-b - sqrt(_UpperCAmelCase )) / (2 * a)
return (
root_a.real if not root_a.imag else root_a,
root_a.real if not root_a.imag else root_a,
)
def _SCREAMING_SNAKE_CASE ():
lowerCAmelCase ,lowerCAmelCase = quadratic_roots(a=5 , b=6 , c=1 )
print(F'The solutions are: {solutiona} and {solutiona}' )
if __name__ == "__main__":
main()
| 4 | import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
UpperCAmelCase = {
"vocab_file": {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/vocab.json",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/vocab.json",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/vocab.json",
"roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json",
"roberta-large-openai-detector": (
"https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json"
),
},
"merges_file": {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/merges.txt",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/merges.txt",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/merges.txt",
"roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt",
"roberta-large-openai-detector": (
"https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt"
),
},
"tokenizer_file": {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/tokenizer.json",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/tokenizer.json",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json",
"roberta-base-openai-detector": (
"https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json"
),
"roberta-large-openai-detector": (
"https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json"
),
},
}
UpperCAmelCase = {
"roberta-base": 5_1_2,
"roberta-large": 5_1_2,
"roberta-large-mnli": 5_1_2,
"distilroberta-base": 5_1_2,
"roberta-base-openai-detector": 5_1_2,
"roberta-large-openai-detector": 5_1_2,
}
class snake_case__ ( _UpperCamelCase ):
_SCREAMING_SNAKE_CASE : Dict = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE : Any = PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_SCREAMING_SNAKE_CASE : int = ["input_ids", "attention_mask"]
_SCREAMING_SNAKE_CASE : List[str] = RobertaTokenizer
def __init__( self : Optional[int] , A__ : List[Any]=None , A__ : Optional[int]=None , A__ : List[str]=None , A__ : Dict="replace" , A__ : List[str]="<s>" , A__ : Optional[Any]="</s>" , A__ : List[str]="</s>" , A__ : List[Any]="<s>" , A__ : int="<unk>" , A__ : int="<pad>" , A__ : List[Any]="<mask>" , A__ : Any=False , A__ : Optional[int]=True , **A__ : Union[str, Any] , ) -> int:
'''simple docstring'''
super().__init__(
A__ , A__ , tokenizer_file=A__ , errors=A__ , bos_token=A__ , eos_token=A__ , sep_token=A__ , cls_token=A__ , unk_token=A__ , pad_token=A__ , mask_token=A__ , add_prefix_space=A__ , trim_offsets=A__ , **A__ , )
snake_case_ : Dict = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , A__ ) != add_prefix_space:
snake_case_ : List[Any] = getattr(A__ , pre_tok_state.pop("type" ) )
snake_case_ : Any = add_prefix_space
snake_case_ : List[Any] = pre_tok_class(**A__ )
snake_case_ : Optional[int] = add_prefix_space
snake_case_ : List[str] = "post_processor"
snake_case_ : Tuple = getattr(self.backend_tokenizer , A__ , A__ )
if tokenizer_component_instance:
snake_case_ : List[str] = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
snake_case_ : str = tuple(state["sep"] )
if "cls" in state:
snake_case_ : Tuple = tuple(state["cls"] )
snake_case_ : Tuple = False
if state.get("add_prefix_space" , A__ ) != add_prefix_space:
snake_case_ : Optional[Any] = add_prefix_space
snake_case_ : str = True
if state.get("trim_offsets" , A__ ) != trim_offsets:
snake_case_ : Optional[int] = trim_offsets
snake_case_ : List[Any] = True
if changes_to_apply:
snake_case_ : int = getattr(A__ , state.pop("type" ) )
snake_case_ : List[Any] = component_class(**A__ )
setattr(self.backend_tokenizer , A__ , A__ )
@property
def UpperCAmelCase__ ( self : Optional[Any] ) -> str:
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def UpperCAmelCase__ ( self : Tuple , A__ : Dict ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Any = AddedToken(A__ , lstrip=A__ , rstrip=A__ ) if isinstance(A__ , A__ ) else value
snake_case_ : Any = value
def UpperCAmelCase__ ( self : int , *A__ : Optional[Any] , **A__ : int ) -> BatchEncoding:
'''simple docstring'''
snake_case_ : Optional[Any] = kwargs.get("is_split_into_words" , A__ )
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*A__ , **A__ )
def UpperCAmelCase__ ( self : Union[str, Any] , *A__ : Any , **A__ : List[Any] ) -> BatchEncoding:
'''simple docstring'''
snake_case_ : Optional[int] = kwargs.get("is_split_into_words" , A__ )
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._encode_plus(*A__ , **A__ )
def UpperCAmelCase__ ( self : Tuple , A__ : str , A__ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
snake_case_ : Optional[Any] = self._tokenizer.model.save(A__ , name=A__ )
return tuple(A__ )
def UpperCAmelCase__ ( self : int , A__ : List[str] , A__ : Union[str, Any]=None ) -> Any:
'''simple docstring'''
snake_case_ : List[str] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def UpperCAmelCase__ ( self : Dict , A__ : List[int] , A__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
snake_case_ : str = [self.sep_token_id]
snake_case_ : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 666 | 0 |
'''simple docstring'''
def A (__lowerCamelCase :int ):
return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print("""Program to check whether a number is a Perfect number or not...""")
_lowercase = int(input("""Enter number: """).strip())
print(F"""{number} is {'' if perfect(number) else 'not '}a Perfect Number.""")
| 5 | from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFXLMRobertaModel
@require_tf
@require_sentencepiece
@require_tokenizers
class snake_case__ ( unittest.TestCase ):
@slow
def UpperCAmelCase__ ( self : int ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Dict = TFXLMRobertaModel.from_pretrained("jplu/tf-xlm-roberta-base" )
snake_case_ : Any = {
"input_ids": tf.convert_to_tensor([[0, 26_46, 1_02_69, 83, 9_99_42, 2]] , dtype=tf.intaa ), # "My dog is cute"
"attention_mask": tf.convert_to_tensor([[1, 1, 1, 1, 1, 1]] , dtype=tf.intaa ),
}
snake_case_ : List[str] = model(A__ )["last_hidden_state"]
snake_case_ : str = tf.TensorShape((1, 6, 7_68) )
self.assertEqual(output.shape , A__ )
# compare the actual values for a slice.
snake_case_ : List[str] = tf.convert_to_tensor(
[
[
[0.068_1762, 0.1089_4451, 0.0677_2504],
[-0.0642_3668, 0.0236_6615, 0.0432_9344],
[-0.0605_7295, 0.0997_4135, -0.0007_0584],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 666 | 0 |
import os
import numpy
import onnx
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Dict , UpperCamelCase__: str ):
SCREAMING_SNAKE_CASE__ = a.name
SCREAMING_SNAKE_CASE__ = b.name
SCREAMING_SNAKE_CASE__ = """"""
SCREAMING_SNAKE_CASE__ = """"""
SCREAMING_SNAKE_CASE__ = a == b
SCREAMING_SNAKE_CASE__ = name_a
SCREAMING_SNAKE_CASE__ = name_b
return res
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[Any] , UpperCamelCase__: Any , UpperCamelCase__: Optional[Any] ):
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(UpperCamelCase__ , UpperCamelCase__ )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , UpperCamelCase__ , UpperCamelCase__ )
_graph_replace_input_with(node_proto.attribute[1].g , UpperCamelCase__ , UpperCamelCase__ )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , UpperCamelCase__ , UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int , UpperCamelCase__: Tuple , UpperCamelCase__: Tuple ):
for n in graph_proto.node:
_node_replace_input_with(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[Any] , UpperCamelCase__: Any , UpperCamelCase__: Optional[Any] ):
SCREAMING_SNAKE_CASE__ = list(model.graph.initializer )
SCREAMING_SNAKE_CASE__ = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
SCREAMING_SNAKE_CASE__ = inits[i].name
SCREAMING_SNAKE_CASE__ = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , UpperCamelCase__ , UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[Any] ):
SCREAMING_SNAKE_CASE__ = os.path.dirname(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = os.path.basename(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = onnx.load(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) )
SCREAMING_SNAKE_CASE__ = list(model.graph.initializer )
SCREAMING_SNAKE_CASE__ = set()
SCREAMING_SNAKE_CASE__ = {}
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = 0
for i in range(len(UpperCamelCase__ ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(UpperCamelCase__ ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(UpperCamelCase__ )
dup_set.add(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = inits[j].data_type
SCREAMING_SNAKE_CASE__ = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print("""unexpected data type: """ , UpperCamelCase__ )
total_reduced_size += mem_size
SCREAMING_SNAKE_CASE__ = inits[i].name
SCREAMING_SNAKE_CASE__ = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(UpperCamelCase__ )
else:
SCREAMING_SNAKE_CASE__ = [name_j]
ind_to_replace.append((j, i) )
print("""total reduced size: """ , total_reduced_size / 1_024 / 1_024 / 1_024 , """GB""" )
SCREAMING_SNAKE_CASE__ = sorted(UpperCamelCase__ )
_remove_dup_initializers_from_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = """optimized_""" + model_file_name
SCREAMING_SNAKE_CASE__ = os.path.join(UpperCamelCase__ , UpperCamelCase__ )
onnx.save(UpperCamelCase__ , UpperCamelCase__ )
return new_model | 6 | from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
UpperCAmelCase = logging.get_logger(__name__)
if is_vision_available():
import PIL
class snake_case__ ( _UpperCamelCase ):
_SCREAMING_SNAKE_CASE : Dict = ["pixel_values"]
def __init__( self : Union[str, Any] , A__ : bool = True , A__ : Dict[str, int] = None , A__ : PILImageResampling = PILImageResampling.BICUBIC , A__ : bool = True , A__ : Dict[str, int] = None , A__ : bool = True , A__ : Union[int, float] = 1 / 2_55 , A__ : bool = True , A__ : Optional[Union[float, List[float]]] = None , A__ : Optional[Union[float, List[float]]] = None , A__ : bool = True , **A__ : Optional[int] , ) -> None:
'''simple docstring'''
super().__init__(**A__ )
snake_case_ : str = size if size is not None else {"shortest_edge": 2_24}
snake_case_ : Union[str, Any] = get_size_dict(A__ , default_to_square=A__ )
snake_case_ : List[Any] = crop_size if crop_size is not None else {"height": 2_24, "width": 2_24}
snake_case_ : Dict = get_size_dict(A__ , default_to_square=A__ , param_name="crop_size" )
snake_case_ : str = do_resize
snake_case_ : str = size
snake_case_ : Optional[Any] = resample
snake_case_ : Any = do_center_crop
snake_case_ : Any = crop_size
snake_case_ : str = do_rescale
snake_case_ : Optional[Any] = rescale_factor
snake_case_ : int = do_normalize
snake_case_ : Optional[Any] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
snake_case_ : List[str] = image_std if image_std is not None else OPENAI_CLIP_STD
snake_case_ : int = do_convert_rgb
def UpperCAmelCase__ ( self : Optional[int] , A__ : np.ndarray , A__ : Dict[str, int] , A__ : PILImageResampling = PILImageResampling.BICUBIC , A__ : Optional[Union[str, ChannelDimension]] = None , **A__ : List[str] , ) -> np.ndarray:
'''simple docstring'''
snake_case_ : str = get_size_dict(A__ , default_to_square=A__ )
if "shortest_edge" not in size:
raise ValueError(f"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}" )
snake_case_ : str = get_resize_output_image_size(A__ , size=size["shortest_edge"] , default_to_square=A__ )
return resize(A__ , size=A__ , resample=A__ , data_format=A__ , **A__ )
def UpperCAmelCase__ ( self : Tuple , A__ : np.ndarray , A__ : Dict[str, int] , A__ : Optional[Union[str, ChannelDimension]] = None , **A__ : List[Any] , ) -> np.ndarray:
'''simple docstring'''
snake_case_ : Optional[int] = get_size_dict(A__ )
if "height" not in size or "width" not in size:
raise ValueError(f"The `size` parameter must contain the keys (height, width). Got {size.keys()}" )
return center_crop(A__ , size=(size["height"], size["width"]) , data_format=A__ , **A__ )
def UpperCAmelCase__ ( self : Optional[Any] , A__ : np.ndarray , A__ : Union[int, float] , A__ : Optional[Union[str, ChannelDimension]] = None , **A__ : List[str] , ) -> str:
'''simple docstring'''
return rescale(A__ , scale=A__ , data_format=A__ , **A__ )
def UpperCAmelCase__ ( self : Any , A__ : np.ndarray , A__ : Union[float, List[float]] , A__ : Union[float, List[float]] , A__ : Optional[Union[str, ChannelDimension]] = None , **A__ : Any , ) -> np.ndarray:
'''simple docstring'''
return normalize(A__ , mean=A__ , std=A__ , data_format=A__ , **A__ )
def UpperCAmelCase__ ( self : List[Any] , A__ : ImageInput , A__ : bool = None , A__ : Dict[str, int] = None , A__ : PILImageResampling = None , A__ : bool = None , A__ : int = None , A__ : bool = None , A__ : float = None , A__ : bool = None , A__ : Optional[Union[float, List[float]]] = None , A__ : Optional[Union[float, List[float]]] = None , A__ : bool = None , A__ : Optional[Union[str, TensorType]] = None , A__ : Optional[ChannelDimension] = ChannelDimension.FIRST , **A__ : Optional[Any] , ) -> PIL.Image.Image:
'''simple docstring'''
snake_case_ : List[Any] = do_resize if do_resize is not None else self.do_resize
snake_case_ : Union[str, Any] = size if size is not None else self.size
snake_case_ : Any = get_size_dict(A__ , param_name="size" , default_to_square=A__ )
snake_case_ : Optional[int] = resample if resample is not None else self.resample
snake_case_ : int = do_center_crop if do_center_crop is not None else self.do_center_crop
snake_case_ : List[str] = crop_size if crop_size is not None else self.crop_size
snake_case_ : Tuple = get_size_dict(A__ , param_name="crop_size" , default_to_square=A__ )
snake_case_ : Optional[int] = do_rescale if do_rescale is not None else self.do_rescale
snake_case_ : str = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case_ : List[Any] = do_normalize if do_normalize is not None else self.do_normalize
snake_case_ : Any = image_mean if image_mean is not None else self.image_mean
snake_case_ : List[str] = image_std if image_std is not None else self.image_std
snake_case_ : List[str] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
snake_case_ : List[Any] = make_list_of_images(A__ )
if not valid_images(A__ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
snake_case_ : Dict = [convert_to_rgb(A__ ) for image in images]
# All transformations expect numpy arrays.
snake_case_ : Dict = [to_numpy_array(A__ ) for image in images]
if do_resize:
snake_case_ : Dict = [self.resize(image=A__ , size=A__ , resample=A__ ) for image in images]
if do_center_crop:
snake_case_ : Tuple = [self.center_crop(image=A__ , size=A__ ) for image in images]
if do_rescale:
snake_case_ : str = [self.rescale(image=A__ , scale=A__ ) for image in images]
if do_normalize:
snake_case_ : int = [self.normalize(image=A__ , mean=A__ , std=A__ ) for image in images]
snake_case_ : List[Any] = [to_channel_dimension_format(A__ , A__ ) for image in images]
snake_case_ : Tuple = {"pixel_values": images}
return BatchFeature(data=A__ , tensor_type=A__ )
| 666 | 0 |
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def _snake_case ( _snake_case : Optional[int] , _snake_case : Optional[Any]=10 ) -> Optional[int]:
'''simple docstring'''
_A = []
for _ in range(_snake_case ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def _snake_case ( _snake_case : Optional[Any] , _snake_case : Union[str, Any]=10 ) -> List[str]:
'''simple docstring'''
_A = []
for step in range(_snake_case ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
_A = os.path.join(_snake_case , 'schedule.bin' )
torch.save(scheduler.state_dict() , _snake_case )
_A = torch.load(_snake_case )
scheduler.load_state_dict(_snake_case )
return lrs
@require_torch
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self : List[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Tuple ):
self.assertEqual(len(_UpperCAmelCase ) , len(_UpperCAmelCase ) )
for a, b in zip(_UpperCAmelCase , _UpperCAmelCase ):
self.assertAlmostEqual(_UpperCAmelCase , _UpperCAmelCase , delta=_UpperCAmelCase )
def lowerCAmelCase_ ( self : Any ):
_A = torch.tensor([0.1, -0.2, -0.1] , requires_grad=_UpperCAmelCase )
_A = torch.tensor([0.4, 0.2, -0.5] )
_A = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
_A = AdamW(params=[w] , lr=2E-1 , weight_decay=0.0 )
for _ in range(100 ):
_A = criterion(_UpperCAmelCase , _UpperCAmelCase )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 )
def lowerCAmelCase_ ( self : int ):
_A = torch.tensor([0.1, -0.2, -0.1] , requires_grad=_UpperCAmelCase )
_A = torch.tensor([0.4, 0.2, -0.5] )
_A = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
_A = Adafactor(
params=[w] , lr=1E-2 , eps=(1E-3_0, 1E-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=_UpperCAmelCase , weight_decay=0.0 , relative_step=_UpperCAmelCase , scale_parameter=_UpperCAmelCase , warmup_init=_UpperCAmelCase , )
for _ in range(1_000 ):
_A = criterion(_UpperCAmelCase , _UpperCAmelCase )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 )
@require_torch
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase : List[str] = nn.Linear(50 , 50 ) if is_torch_available() else None
UpperCAmelCase : Tuple = AdamW(m.parameters() , lr=10.0 ) if is_torch_available() else None
UpperCAmelCase : Dict = 10
def lowerCAmelCase_ ( self : Any , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : str , _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[Any]=None ):
self.assertEqual(len(_UpperCAmelCase ) , len(_UpperCAmelCase ) )
for a, b in zip(_UpperCAmelCase , _UpperCAmelCase ):
self.assertAlmostEqual(_UpperCAmelCase , _UpperCAmelCase , delta=_UpperCAmelCase , msg=_UpperCAmelCase )
def lowerCAmelCase_ ( self : List[Any] ):
_A = {'num_warmup_steps': 2, 'num_training_steps': 10}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
_A = {
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{'num_warmup_steps': 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, 'num_cycles': 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, 'power': 2.0, 'lr_end': 1E-7},
[0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156],
),
get_inverse_sqrt_schedule: (
{'num_warmup_steps': 2},
[0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714],
),
}
for scheduler_func, data in scheds.items():
_A , _A = data
_A = scheduler_func(self.optimizer , **_UpperCAmelCase )
self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 )
_A = unwrap_schedule(_UpperCAmelCase , self.num_steps )
self.assertListAlmostEqual(
_UpperCAmelCase , _UpperCAmelCase , tol=1E-2 , msg=F'''failed for {scheduler_func} in normal scheduler''' , )
_A = scheduler_func(self.optimizer , **_UpperCAmelCase )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(_UpperCAmelCase ) # wrap to test picklability of the schedule
_A = unwrap_and_save_reload_schedule(_UpperCAmelCase , self.num_steps )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase , msg=F'''failed for {scheduler_func} in save and reload''' )
class lowercase_ :
'''simple docstring'''
def __init__( self : Union[str, Any] , _UpperCAmelCase : Optional[int] ):
_A = fn
def __call__( self : Tuple , *_UpperCAmelCase : List[str] , **_UpperCAmelCase : List[str] ):
return self.fn(*_UpperCAmelCase , **_UpperCAmelCase )
@classmethod
def lowerCAmelCase_ ( self : Union[str, Any] , _UpperCAmelCase : Any ):
_A = list(map(self , scheduler.lr_lambdas ) )
| 7 | from __future__ import annotations
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: tuple[int, int] , lowerCAmelCase_: int ):
snake_case_ ,snake_case_ : Dict = position
snake_case_ : int = [
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
snake_case_ : Union[str, Any] = []
for position in positions:
snake_case_ ,snake_case_ : Union[str, Any] = position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(lowerCAmelCase_ )
return permissible_positions
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: list[list[int]] ):
return not any(elem == 0 for row in board for elem in row )
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: list[list[int]] , lowerCAmelCase_: tuple[int, int] , lowerCAmelCase_: int ):
if is_complete(lowerCAmelCase_ ):
return True
for position in get_valid_pos(lowerCAmelCase_ , len(lowerCAmelCase_ ) ):
snake_case_ ,snake_case_ : Dict = position
if board[y][x] == 0:
snake_case_ : List[str] = curr + 1
if open_knight_tour_helper(lowerCAmelCase_ , lowerCAmelCase_ , curr + 1 ):
return True
snake_case_ : Dict = 0
return False
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: int ):
snake_case_ : Any = [[0 for i in range(lowerCAmelCase_ )] for j in range(lowerCAmelCase_ )]
for i in range(lowerCAmelCase_ ):
for j in range(lowerCAmelCase_ ):
snake_case_ : Optional[Any] = 1
if open_knight_tour_helper(lowerCAmelCase_ , (i, j) , 1 ):
return board
snake_case_ : Dict = 0
snake_case_ : str = f"Open Kight Tour cannot be performed on a board of size {n}"
raise ValueError(lowerCAmelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 666 | 0 |
'''simple docstring'''
import json
import os
from typing import Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowercase__ : Optional[Any] = logging.get_logger(__name__)
lowercase__ : Optional[int] = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
}
lowercase__ : Dict = {
'''vocab_file''': {'''ctrl''': '''https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json'''},
'''merges_file''': {'''ctrl''': '''https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt'''},
}
lowercase__ : Optional[int] = {
'''ctrl''': 2_56,
}
lowercase__ : Tuple = {
'''Pregnancy''': 16_86_29,
'''Christianity''': 76_75,
'''Explain''': 10_64_23,
'''Fitness''': 6_34_40,
'''Saving''': 6_31_63,
'''Ask''': 2_71_71,
'''Ass''': 9_59_85,
'''Joke''': 16_35_09,
'''Questions''': 4_56_22,
'''Thoughts''': 4_96_05,
'''Retail''': 5_23_42,
'''Feminism''': 16_43_38,
'''Writing''': 1_19_92,
'''Atheism''': 19_22_63,
'''Netflix''': 4_86_16,
'''Computing''': 3_96_39,
'''Opinion''': 4_32_13,
'''Alone''': 4_49_67,
'''Funny''': 5_89_17,
'''Gaming''': 4_03_58,
'''Human''': 40_88,
'''India''': 13_31,
'''Joker''': 7_71_38,
'''Diet''': 3_62_06,
'''Legal''': 1_18_59,
'''Norman''': 49_39,
'''Tip''': 7_26_89,
'''Weight''': 5_23_43,
'''Movies''': 4_62_73,
'''Running''': 2_34_25,
'''Science''': 20_90,
'''Horror''': 3_77_93,
'''Confession''': 6_05_72,
'''Finance''': 1_22_50,
'''Politics''': 1_63_60,
'''Scary''': 19_19_85,
'''Support''': 1_26_54,
'''Technologies''': 3_25_16,
'''Teenage''': 6_61_60,
'''Event''': 3_27_69,
'''Learned''': 6_74_60,
'''Notion''': 18_27_70,
'''Wikipedia''': 3_75_83,
'''Books''': 66_65,
'''Extract''': 7_60_50,
'''Confessions''': 10_27_01,
'''Conspiracy''': 7_59_32,
'''Links''': 6_36_74,
'''Narcissus''': 15_04_25,
'''Relationship''': 5_47_66,
'''Relationships''': 13_47_96,
'''Reviews''': 4_16_71,
'''News''': 42_56,
'''Translation''': 2_68_20,
'''multilingual''': 12_84_06,
}
def _lowerCAmelCase ( __snake_case : Optional[Any] ) -> Dict:
__A : str = set()
__A : Tuple = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__A : List[str] = char
__A : Optional[int] = set(__snake_case )
return pairs
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = VOCAB_FILES_NAMES
lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase = CONTROL_CODES
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase="<unk>" , **_UpperCAmelCase):
'''simple docstring'''
super().__init__(unk_token=_UpperCAmelCase , **_UpperCAmelCase)
with open(_UpperCAmelCase , encoding='utf-8') as vocab_handle:
__A : Any = json.load(_UpperCAmelCase)
__A : Dict = {v: k for k, v in self.encoder.items()}
with open(_UpperCAmelCase , encoding='utf-8') as merges_handle:
__A : Dict = merges_handle.read().split('\n')[1:-1]
__A : Optional[Any] = [tuple(merge.split()) for merge in merges]
__A : int = dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase))))
__A : int = {}
@property
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return len(self.encoder)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
if token in self.cache:
return self.cache[token]
__A : Optional[int] = tuple(_UpperCAmelCase)
__A : str = tuple(list(word[:-1]) + [word[-1] + '</w>'])
__A : str = get_pairs(_UpperCAmelCase)
if not pairs:
return token
while True:
__A : List[str] = min(_UpperCAmelCase , key=lambda _UpperCAmelCase: self.bpe_ranks.get(_UpperCAmelCase , float('inf')))
if bigram not in self.bpe_ranks:
break
__A ,__A : Any = bigram
__A : Optional[Any] = []
__A : int = 0
while i < len(_UpperCAmelCase):
try:
__A : Any = word.index(_UpperCAmelCase , _UpperCAmelCase)
except ValueError:
new_word.extend(word[i:])
break
else:
new_word.extend(word[i:j])
__A : List[str] = j
if word[i] == first and i < len(_UpperCAmelCase) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
__A : str = tuple(_UpperCAmelCase)
__A : Dict = new_word
if len(_UpperCAmelCase) == 1:
break
else:
__A : List[str] = get_pairs(_UpperCAmelCase)
__A : Any = '@@ '.join(_UpperCAmelCase)
__A : List[str] = word[:-4]
__A : Any = word
return word
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
__A : Optional[int] = []
__A : Dict = re.findall(R'\S+\n?' , _UpperCAmelCase)
for token in words:
split_tokens.extend(list(self.bpe(_UpperCAmelCase).split(' ')))
return split_tokens
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
return self.encoder.get(_UpperCAmelCase , self.encoder.get(self.unk_token))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
return self.decoder.get(_UpperCAmelCase , self.unk_token)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
__A : List[Any] = ' '.join(_UpperCAmelCase).replace('@@ ' , '').strip()
return out_string
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase = None):
'''simple docstring'''
if not os.path.isdir(_UpperCAmelCase):
logger.error(F'Vocabulary path ({save_directory}) should be a directory')
return
__A : Tuple = os.path.join(
_UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
__A : Tuple = os.path.join(
_UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'])
with open(_UpperCAmelCase , 'w' , encoding='utf-8') as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_UpperCAmelCase , ensure_ascii=_UpperCAmelCase) + '\n')
__A : Any = 0
with open(_UpperCAmelCase , 'w' , encoding='utf-8') as writer:
writer.write('#version: 0.2\n')
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _UpperCAmelCase: kv[1]):
if index != token_index:
logger.warning(
F'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
' Please check that the tokenizer is not corrupted!')
__A : List[str] = token_index
writer.write(' '.join(_UpperCAmelCase) + '\n')
index += 1
return vocab_file, merge_file
# def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True):
# filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens))
# tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens)
# tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far)
# return ''.join(tokens_generated_so_far) | 8 | from ...configuration_utils import PretrainedConfig
class snake_case__ ( _UpperCamelCase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = "bert-generation"
def __init__( self : Optional[int] , A__ : List[Any]=5_03_58 , A__ : Any=10_24 , A__ : Any=24 , A__ : List[Any]=16 , A__ : List[Any]=40_96 , A__ : int="gelu" , A__ : List[str]=0.1 , A__ : List[str]=0.1 , A__ : str=5_12 , A__ : int=0.02 , A__ : Any=1E-12 , A__ : Optional[Any]=0 , A__ : List[str]=2 , A__ : Optional[int]=1 , A__ : str="absolute" , A__ : Any=True , **A__ : Optional[Any] , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(pad_token_id=A__ , bos_token_id=A__ , eos_token_id=A__ , **A__ )
snake_case_ : str = vocab_size
snake_case_ : int = hidden_size
snake_case_ : Optional[Any] = num_hidden_layers
snake_case_ : Union[str, Any] = num_attention_heads
snake_case_ : Optional[Any] = hidden_act
snake_case_ : Tuple = intermediate_size
snake_case_ : str = hidden_dropout_prob
snake_case_ : Optional[Any] = attention_probs_dropout_prob
snake_case_ : str = max_position_embeddings
snake_case_ : Optional[Any] = initializer_range
snake_case_ : Optional[int] = layer_norm_eps
snake_case_ : str = position_embedding_type
snake_case_ : Dict = use_cache
| 666 | 0 |
def A ( __UpperCamelCase = 4_000_000 ) -> int:
A__ = [0, 1]
A__ = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1] )
if fib[i + 2] > n:
break
i += 1
A__ = 0
for j in range(len(__UpperCamelCase ) - 1 ):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(f'{solution() = }')
| 9 | import math
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: int ):
snake_case_ : Any = []
snake_case_ : List[str] = 2
snake_case_ : Optional[int] = int(math.sqrt(lowerCAmelCase_ ) ) # Size of every segment
snake_case_ : str = [True] * (end + 1)
snake_case_ : Any = []
while start <= end:
if temp[start] is True:
in_prime.append(lowerCAmelCase_ )
for i in range(start * start , end + 1 , lowerCAmelCase_ ):
snake_case_ : Union[str, Any] = False
start += 1
prime += in_prime
snake_case_ : Dict = end + 1
snake_case_ : Dict = min(2 * end , lowerCAmelCase_ )
while low <= n:
snake_case_ : Any = [True] * (high - low + 1)
for each in in_prime:
snake_case_ : Optional[Any] = math.floor(low / each ) * each
if t < low:
t += each
for j in range(lowerCAmelCase_ , high + 1 , lowerCAmelCase_ ):
snake_case_ : List[Any] = False
for j in range(len(lowerCAmelCase_ ) ):
if temp[j] is True:
prime.append(j + low )
snake_case_ : int = high + 1
snake_case_ : Union[str, Any] = min(high + end , lowerCAmelCase_ )
return prime
print(sieve(1_0**6))
| 666 | 0 |
import inspect
import unittest
from transformers import ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel
from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCAmelCase_ :
def __init__( self : int , _A : int , _A : Dict=13 , _A : Optional[Any]=32 , _A : List[str]=3 , _A : Tuple=4 , _A : Union[str, Any]=[10, 20, 30, 40] , _A : Optional[int]=[2, 2, 3, 2] , _A : Union[str, Any]=True , _A : str=True , _A : List[Any]=37 , _A : List[str]="gelu" , _A : str=10 , _A : Optional[Any]=0.02 , _A : Optional[Any]=["stage2", "stage3", "stage4"] , _A : Any=[2, 3, 4] , _A : Union[str, Any]=None , ):
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = image_size
_UpperCamelCase = num_channels
_UpperCamelCase = num_stages
_UpperCamelCase = hidden_sizes
_UpperCamelCase = depths
_UpperCamelCase = is_training
_UpperCamelCase = use_labels
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_act
_UpperCamelCase = num_labels
_UpperCamelCase = initializer_range
_UpperCamelCase = out_features
_UpperCamelCase = out_indices
_UpperCamelCase = scope
def UpperCamelCase_ ( self : Union[str, Any] ):
_UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCamelCase = None
if self.use_labels:
_UpperCamelCase = ids_tensor([self.batch_size] , self.num_labels )
_UpperCamelCase = self.get_config()
return config, pixel_values, labels
def UpperCamelCase_ ( self : int ):
return ConvNextConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=_A , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def UpperCamelCase_ ( self : Optional[Any] , _A : Optional[int] , _A : Union[str, Any] , _A : Dict ):
_UpperCamelCase = ConvNextModel(config=_A )
model.to(_A )
model.eval()
_UpperCamelCase = model(_A )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def UpperCamelCase_ ( self : Optional[int] , _A : Optional[Any] , _A : Tuple , _A : int ):
_UpperCamelCase = ConvNextForImageClassification(_A )
model.to(_A )
model.eval()
_UpperCamelCase = model(_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self : List[str] , _A : Union[str, Any] , _A : Dict , _A : Any ):
_UpperCamelCase = ConvNextBackbone(config=_A )
model.to(_A )
model.eval()
_UpperCamelCase = model(_A )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
_UpperCamelCase = None
_UpperCamelCase = ConvNextBackbone(config=_A )
model.to(_A )
model.eval()
_UpperCamelCase = model(_A )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def UpperCamelCase_ ( self : Tuple ):
_UpperCamelCase = self.prepare_config_and_inputs()
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = config_and_inputs
_UpperCamelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( __lowercase, __lowercase, unittest.TestCase ):
UpperCAmelCase = (
(
ConvNextModel,
ConvNextForImageClassification,
ConvNextBackbone,
)
if is_torch_available()
else ()
)
UpperCAmelCase = (
{"feature-extraction": ConvNextModel, "image-classification": ConvNextForImageClassification}
if is_torch_available()
else {}
)
UpperCAmelCase = True
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
def UpperCamelCase_ ( self : Union[str, Any] ):
_UpperCamelCase = ConvNextModelTester(self )
_UpperCamelCase = ConfigTester(self , config_class=_A , has_text_modality=_A , hidden_size=37 )
def UpperCamelCase_ ( self : str ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCamelCase_ ( self : str ):
return
@unittest.skip(reason='''ConvNext does not use inputs_embeds''' )
def UpperCamelCase_ ( self : Union[str, Any] ):
pass
@unittest.skip(reason='''ConvNext does not support input and output embeddings''' )
def UpperCamelCase_ ( self : Optional[Any] ):
pass
@unittest.skip(reason='''ConvNext does not use feedforward chunking''' )
def UpperCamelCase_ ( self : Optional[int] ):
pass
def UpperCamelCase_ ( self : List[str] ):
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase = model_class(_A )
_UpperCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCamelCase = [*signature.parameters.keys()]
_UpperCamelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _A )
def UpperCamelCase_ ( self : Optional[Any] ):
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def UpperCamelCase_ ( self : int ):
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_A )
def UpperCamelCase_ ( self : List[str] ):
def check_hidden_states_output(_A : int , _A : Optional[int] , _A : Dict ):
_UpperCamelCase = model_class(_A )
model.to(_A )
model.eval()
with torch.no_grad():
_UpperCamelCase = model(**self._prepare_for_class(_A , _A ) )
_UpperCamelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_UpperCamelCase = self.model_tester.num_stages
self.assertEqual(len(_A ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase = True
check_hidden_states_output(_A , _A , _A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCamelCase = True
check_hidden_states_output(_A , _A , _A )
def UpperCamelCase_ ( self : Dict ):
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_A )
@slow
def UpperCamelCase_ ( self : List[str] ):
for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase = ConvNextModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def _snake_case ( ):
_UpperCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class lowerCAmelCase_ ( unittest.TestCase ):
@cached_property
def UpperCamelCase_ ( self : int ):
return AutoImageProcessor.from_pretrained('''facebook/convnext-tiny-224''' ) if is_vision_available() else None
@slow
def UpperCamelCase_ ( self : List[str] ):
_UpperCamelCase = ConvNextForImageClassification.from_pretrained('''facebook/convnext-tiny-224''' ).to(_A )
_UpperCamelCase = self.default_image_processor
_UpperCamelCase = prepare_img()
_UpperCamelCase = image_processor(images=_A , return_tensors='''pt''' ).to(_A )
# forward pass
with torch.no_grad():
_UpperCamelCase = model(**_A )
# verify the logits
_UpperCamelCase = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _A )
_UpperCamelCase = torch.tensor([-0.0260, -0.4739, 0.1911] ).to(_A )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _A , atol=1e-4 ) )
@require_torch
class lowerCAmelCase_ ( unittest.TestCase, __lowercase ):
UpperCAmelCase = (ConvNextBackbone,) if is_torch_available() else ()
UpperCAmelCase = ConvNextConfig
UpperCAmelCase = False
def UpperCamelCase_ ( self : Dict ):
_UpperCamelCase = ConvNextModelTester(self )
| 10 | import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ConditionalDetrImageProcessor
class snake_case__ ( unittest.TestCase ):
def __init__( self : List[str] , A__ : List[Any] , A__ : int=7 , A__ : Union[str, Any]=3 , A__ : List[str]=30 , A__ : Optional[int]=4_00 , A__ : Optional[Any]=True , A__ : Optional[int]=None , A__ : Optional[Any]=True , A__ : Any=[0.5, 0.5, 0.5] , A__ : int=[0.5, 0.5, 0.5] , A__ : Any=True , A__ : int=1 / 2_55 , A__ : List[str]=True , ) -> Dict:
'''simple docstring'''
snake_case_ : int = size if size is not None else {"shortest_edge": 18, "longest_edge": 13_33}
snake_case_ : Any = parent
snake_case_ : Optional[int] = batch_size
snake_case_ : List[Any] = num_channels
snake_case_ : Union[str, Any] = min_resolution
snake_case_ : List[Any] = max_resolution
snake_case_ : Tuple = do_resize
snake_case_ : Dict = size
snake_case_ : Optional[Any] = do_normalize
snake_case_ : int = image_mean
snake_case_ : List[Any] = image_std
snake_case_ : Tuple = do_rescale
snake_case_ : Any = rescale_factor
snake_case_ : Optional[int] = do_pad
def UpperCAmelCase__ ( self : int ) -> List[str]:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def UpperCAmelCase__ ( self : Optional[int] , A__ : Optional[int] , A__ : Any=False ) -> Optional[Any]:
'''simple docstring'''
if not batched:
snake_case_ : Any = image_inputs[0]
if isinstance(A__ , Image.Image ):
snake_case_ ,snake_case_ : Dict = image.size
else:
snake_case_ ,snake_case_ : int = image.shape[1], image.shape[2]
if w < h:
snake_case_ : Dict = int(self.size["shortest_edge"] * h / w )
snake_case_ : Optional[int] = self.size["shortest_edge"]
elif w > h:
snake_case_ : Optional[int] = self.size["shortest_edge"]
snake_case_ : str = int(self.size["shortest_edge"] * w / h )
else:
snake_case_ : Optional[int] = self.size["shortest_edge"]
snake_case_ : List[Any] = self.size["shortest_edge"]
else:
snake_case_ : str = []
for image in image_inputs:
snake_case_ ,snake_case_ : Tuple = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
snake_case_ : List[Any] = max(A__ , key=lambda A__ : item[0] )[0]
snake_case_ : int = max(A__ , key=lambda A__ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class snake_case__ ( _UpperCamelCase , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : Optional[int] = ConditionalDetrImageProcessor if is_vision_available() else None
def UpperCAmelCase__ ( self : Tuple ) -> Dict:
'''simple docstring'''
snake_case_ : List[str] = ConditionalDetrImageProcessingTester(self )
@property
def UpperCAmelCase__ ( self : Dict ) -> Tuple:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase__ ( self : Any ) -> Tuple:
'''simple docstring'''
snake_case_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A__ , "image_mean" ) )
self.assertTrue(hasattr(A__ , "image_std" ) )
self.assertTrue(hasattr(A__ , "do_normalize" ) )
self.assertTrue(hasattr(A__ , "do_resize" ) )
self.assertTrue(hasattr(A__ , "size" ) )
def UpperCAmelCase__ ( self : List[str] ) -> Tuple:
'''simple docstring'''
snake_case_ : List[str] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 13_33} )
self.assertEqual(image_processor.do_pad , A__ )
snake_case_ : Optional[int] = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=A__ )
self.assertEqual(image_processor.size , {"shortest_edge": 42, "longest_edge": 84} )
self.assertEqual(image_processor.do_pad , A__ )
def UpperCAmelCase__ ( self : str ) -> Optional[int]:
'''simple docstring'''
pass
def UpperCAmelCase__ ( self : Dict ) -> Tuple:
'''simple docstring'''
snake_case_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case_ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=A__ )
for image in image_inputs:
self.assertIsInstance(A__ , Image.Image )
# Test not batched input
snake_case_ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case_ ,snake_case_ : Optional[Any] = self.image_processor_tester.get_expected_values(A__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case_ ,snake_case_ : List[Any] = self.image_processor_tester.get_expected_values(A__ , batched=A__ )
snake_case_ : int = image_processing(A__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase__ ( self : int ) -> Any:
'''simple docstring'''
snake_case_ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case_ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=A__ , numpify=A__ )
for image in image_inputs:
self.assertIsInstance(A__ , np.ndarray )
# Test not batched input
snake_case_ : int = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case_ ,snake_case_ : List[str] = self.image_processor_tester.get_expected_values(A__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case_ : Optional[int] = image_processing(A__ , return_tensors="pt" ).pixel_values
snake_case_ ,snake_case_ : Dict = self.image_processor_tester.get_expected_values(A__ , batched=A__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase__ ( self : Tuple ) -> str:
'''simple docstring'''
snake_case_ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case_ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=A__ , torchify=A__ )
for image in image_inputs:
self.assertIsInstance(A__ , torch.Tensor )
# Test not batched input
snake_case_ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case_ ,snake_case_ : List[Any] = self.image_processor_tester.get_expected_values(A__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case_ : Any = image_processing(A__ , return_tensors="pt" ).pixel_values
snake_case_ ,snake_case_ : int = self.image_processor_tester.get_expected_values(A__ , batched=A__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def UpperCAmelCase__ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Dict = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
snake_case_ : Optional[Any] = json.loads(f.read() )
snake_case_ : int = {"image_id": 3_97_69, "annotations": target}
# encode them
snake_case_ : Optional[int] = ConditionalDetrImageProcessor.from_pretrained("microsoft/conditional-detr-resnet-50" )
snake_case_ : Any = image_processing(images=A__ , annotations=A__ , return_tensors="pt" )
# verify pixel values
snake_case_ : List[Any] = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["pixel_values"].shape , A__ )
snake_case_ : List[str] = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , A__ , atol=1E-4 ) )
# verify area
snake_case_ : Tuple = torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , A__ ) )
# verify boxes
snake_case_ : Any = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , A__ )
snake_case_ : str = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , A__ , atol=1E-3 ) )
# verify image_id
snake_case_ : List[Any] = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , A__ ) )
# verify is_crowd
snake_case_ : Union[str, Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , A__ ) )
# verify class_labels
snake_case_ : Any = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , A__ ) )
# verify orig_size
snake_case_ : Any = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , A__ ) )
# verify size
snake_case_ : List[str] = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , A__ ) )
@slow
def UpperCAmelCase__ ( self : int ) -> str:
'''simple docstring'''
snake_case_ : Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
snake_case_ : Any = json.loads(f.read() )
snake_case_ : Optional[Any] = {"file_name": "000000039769.png", "image_id": 3_97_69, "segments_info": target}
snake_case_ : int = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
snake_case_ : Union[str, Any] = ConditionalDetrImageProcessor(format="coco_panoptic" )
snake_case_ : str = image_processing(images=A__ , annotations=A__ , masks_path=A__ , return_tensors="pt" )
# verify pixel values
snake_case_ : int = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["pixel_values"].shape , A__ )
snake_case_ : str = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , A__ , atol=1E-4 ) )
# verify area
snake_case_ : Optional[int] = torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , A__ ) )
# verify boxes
snake_case_ : str = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , A__ )
snake_case_ : str = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , A__ , atol=1E-3 ) )
# verify image_id
snake_case_ : List[str] = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , A__ ) )
# verify is_crowd
snake_case_ : Optional[int] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , A__ ) )
# verify class_labels
snake_case_ : Optional[int] = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , A__ ) )
# verify masks
snake_case_ : Union[str, Any] = 82_28_73
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , A__ )
# verify orig_size
snake_case_ : Dict = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , A__ ) )
# verify size
snake_case_ : str = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , A__ ) )
| 666 | 0 |
'''simple docstring'''
import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"facebook/detr-resnet-50": "https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json",
# See all DETR models at https://huggingface.co/models?filter=detr
}
class __A ( A ):
'''simple docstring'''
__lowerCamelCase : int = 'detr'
__lowerCamelCase : Tuple = ['past_key_values']
__lowerCamelCase : Union[str, Any] = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__(self , A=True , A=None , A=3 , A=100 , A=6 , A=2_048 , A=8 , A=6 , A=2_048 , A=8 , A=0.0 , A=0.0 , A=True , A="relu" , A=256 , A=0.1 , A=0.0 , A=0.0 , A=0.02 , A=1.0 , A=False , A="sine" , A="resnet50" , A=True , A=False , A=1 , A=5 , A=2 , A=1 , A=1 , A=5 , A=2 , A=0.1 , **A , ) -> int:
"""simple docstring"""
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
_a = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(A , A ):
_a = backbone_config.get('''model_type''' )
_a = CONFIG_MAPPING[backbone_model_type]
_a = config_class.from_dict(A )
# set timm attributes to None
_a , _a , _a = None, None, None
_a = use_timm_backbone
_a = backbone_config
_a = num_channels
_a = num_queries
_a = d_model
_a = encoder_ffn_dim
_a = encoder_layers
_a = encoder_attention_heads
_a = decoder_ffn_dim
_a = decoder_layers
_a = decoder_attention_heads
_a = dropout
_a = attention_dropout
_a = activation_dropout
_a = activation_function
_a = init_std
_a = init_xavier_std
_a = encoder_layerdrop
_a = decoder_layerdrop
_a = encoder_layers
_a = auxiliary_loss
_a = position_embedding_type
_a = backbone
_a = use_pretrained_backbone
_a = dilation
# Hungarian matcher
_a = class_cost
_a = bbox_cost
_a = giou_cost
# Loss coefficients
_a = mask_loss_coefficient
_a = dice_loss_coefficient
_a = bbox_loss_coefficient
_a = giou_loss_coefficient
_a = eos_coefficient
super().__init__(is_encoder_decoder=A , **A )
@property
def a__ (self ) -> int:
"""simple docstring"""
return self.encoder_attention_heads
@property
def a__ (self ) -> int:
"""simple docstring"""
return self.d_model
@classmethod
def a__ (cls , A , **A ) -> Optional[int]:
"""simple docstring"""
return cls(backbone_config=A , **A )
def a__ (self ) -> Dict[str, any]:
"""simple docstring"""
_a = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
_a = self.backbone_config.to_dict()
_a = self.__class__.model_type
return output
class __A ( A ):
'''simple docstring'''
__lowerCamelCase : Optional[int] = version.parse('1.11' )
@property
def a__ (self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''pixel_mask''', {0: '''batch'''}),
] )
@property
def a__ (self ) -> float:
"""simple docstring"""
return 1E-5
@property
def a__ (self ) -> int:
"""simple docstring"""
return 12
| 11 | import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
UpperCAmelCase = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class snake_case__ :
_SCREAMING_SNAKE_CASE : str = field(
default=_UpperCamelCase , metadata={"help": "Model type selected in the list: " + ", ".join(_UpperCamelCase )} )
_SCREAMING_SNAKE_CASE : str = field(
default=_UpperCamelCase , metadata={"help": "The input data dir. Should contain the .json files for the SQuAD task."} )
_SCREAMING_SNAKE_CASE : int = field(
default=1_2_8 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
_SCREAMING_SNAKE_CASE : int = field(
default=1_2_8 , metadata={"help": "When splitting up a long document into chunks, how much stride to take between chunks."} , )
_SCREAMING_SNAKE_CASE : int = field(
default=6_4 , metadata={
"help": (
"The maximum number of tokens for the question. Questions longer than this will "
"be truncated to this length."
)
} , )
_SCREAMING_SNAKE_CASE : int = field(
default=3_0 , metadata={
"help": (
"The maximum length of an answer that can be generated. This is needed because the start "
"and end predictions are not conditioned on one another."
)
} , )
_SCREAMING_SNAKE_CASE : bool = field(
default=_UpperCamelCase , metadata={"help": "Overwrite the cached training and evaluation sets"} )
_SCREAMING_SNAKE_CASE : bool = field(
default=_UpperCamelCase , metadata={"help": "If true, the SQuAD examples contain some that do not have an answer."} )
_SCREAMING_SNAKE_CASE : float = field(
default=0.0 , metadata={"help": "If null_score - best_non_null is greater than the threshold predict null."} )
_SCREAMING_SNAKE_CASE : int = field(
default=2_0 , metadata={"help": "If null_score - best_non_null is greater than the threshold predict null."} )
_SCREAMING_SNAKE_CASE : int = field(
default=0 , metadata={
"help": (
"language id of input for language-specific xlm models (see"
" tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)"
)
} , )
_SCREAMING_SNAKE_CASE : int = field(default=1 , metadata={"help": "multiple threads for converting example to features"} )
class snake_case__ ( _UpperCamelCase ):
_SCREAMING_SNAKE_CASE : Tuple = "train"
_SCREAMING_SNAKE_CASE : Any = "dev"
class snake_case__ ( _UpperCamelCase ):
_SCREAMING_SNAKE_CASE : SquadDataTrainingArguments
_SCREAMING_SNAKE_CASE : List[SquadFeatures]
_SCREAMING_SNAKE_CASE : Split
_SCREAMING_SNAKE_CASE : bool
def __init__( self : str , A__ : SquadDataTrainingArguments , A__ : PreTrainedTokenizer , A__ : Optional[int] = None , A__ : Union[str, Split] = Split.train , A__ : Optional[bool] = False , A__ : Optional[str] = None , A__ : Optional[str] = "pt" , ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Tuple = args
snake_case_ : int = is_language_sensitive
snake_case_ : int = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(A__ , A__ ):
try:
snake_case_ : List[str] = Split[mode]
except KeyError:
raise KeyError("mode is not a valid split name" )
snake_case_ : Tuple = mode
# Load data features from cache or dataset file
snake_case_ : Dict = "v2" if args.version_2_with_negative else "v1"
snake_case_ : List[Any] = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f"cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}" , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
snake_case_ : List[Any] = cached_features_file + ".lock"
with FileLock(A__ ):
if os.path.exists(A__ ) and not args.overwrite_cache:
snake_case_ : int = time.time()
snake_case_ : List[Any] = torch.load(A__ )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
snake_case_ : Tuple = self.old_features["features"]
snake_case_ : List[str] = self.old_features.get("dataset" , A__ )
snake_case_ : Tuple = self.old_features.get("examples" , A__ )
logger.info(
f"Loading features from cached file {cached_features_file} [took %.3f s]" , time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
f"Deleting cached file {cached_features_file} will allow dataset and examples to be cached in"
" future run" )
else:
if mode == Split.dev:
snake_case_ : Tuple = self.processor.get_dev_examples(args.data_dir )
else:
snake_case_ : Tuple = self.processor.get_train_examples(args.data_dir )
snake_case_ ,snake_case_ : Optional[Any] = squad_convert_examples_to_features(
examples=self.examples , tokenizer=A__ , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=A__ , )
snake_case_ : Any = time.time()
torch.save(
{"features": self.features, "dataset": self.dataset, "examples": self.examples} , A__ , )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f"Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]" )
def __len__( self : str ) -> Dict:
'''simple docstring'''
return len(self.features )
def __getitem__( self : Optional[int] , A__ : Optional[int] ) -> Dict[str, torch.Tensor]:
'''simple docstring'''
snake_case_ : Any = self.features[i]
snake_case_ : Optional[int] = torch.tensor(feature.input_ids , dtype=torch.long )
snake_case_ : Union[str, Any] = torch.tensor(feature.attention_mask , dtype=torch.long )
snake_case_ : List[Any] = torch.tensor(feature.token_type_ids , dtype=torch.long )
snake_case_ : List[Any] = torch.tensor(feature.cls_index , dtype=torch.long )
snake_case_ : str = torch.tensor(feature.p_mask , dtype=torch.float )
snake_case_ : str = torch.tensor(feature.is_impossible , dtype=torch.float )
snake_case_ : Optional[int] = {
"input_ids": input_ids,
"attention_mask": attention_mask,
"token_type_ids": token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({"cls_index": cls_index, "p_mask": p_mask} )
if self.args.version_2_with_negative:
inputs.update({"is_impossible": is_impossible} )
if self.is_language_sensitive:
inputs.update({"langs": (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
snake_case_ : Any = torch.tensor(feature.start_position , dtype=torch.long )
snake_case_ : List[Any] = torch.tensor(feature.end_position , dtype=torch.long )
inputs.update({"start_positions": start_positions, "end_positions": end_positions} )
return inputs
| 666 | 0 |
from __future__ import annotations
from collections import Counter
from random import random
class _snake_case :
def __init__( self):
'''simple docstring'''
lowercase__ : List[Any] = {}
def lowercase__ ( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : Optional[int] = {}
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
if nodea not in self.connections:
self.add_node(SCREAMING_SNAKE_CASE_)
if nodea not in self.connections:
self.add_node(SCREAMING_SNAKE_CASE_)
lowercase__ : List[str] = probability
def lowercase__ ( self):
'''simple docstring'''
return list(self.connections)
def lowercase__ ( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : Dict = 0
lowercase__ : List[Any] = random()
for dest in self.connections[node]:
current_probability += self.connections[node][dest]
if current_probability > random_value:
return dest
return ""
def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ ) -> dict[str, int]:
'''simple docstring'''
lowercase__ : List[Any] = MarkovChainGraphUndirectedUnweighted()
for nodea, nodea, probability in transitions:
graph.add_transition_probability(lowercase_ , lowercase_ , lowercase_ )
lowercase__ : Union[str, Any] = Counter(graph.get_nodes() )
lowercase__ : Tuple = start
for _ in range(lowercase_ ):
lowercase__ : Optional[Any] = graph.transition(lowercase_ )
visited[node] += 1
return visited
if __name__ == "__main__":
import doctest
doctest.testmod()
| 12 | import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
"microsoft/git-base": "https://huggingface.co/microsoft/git-base/resolve/main/config.json",
}
class snake_case__ ( _UpperCamelCase ):
_SCREAMING_SNAKE_CASE : Dict = "git_vision_model"
def __init__( self : int , A__ : Union[str, Any]=7_68 , A__ : List[Any]=30_72 , A__ : Tuple=12 , A__ : Optional[Any]=12 , A__ : Optional[int]=3 , A__ : List[str]=2_24 , A__ : Dict=16 , A__ : int="quick_gelu" , A__ : Any=1E-5 , A__ : Tuple=0.0 , A__ : Optional[int]=0.02 , **A__ : List[str] , ) -> Optional[int]:
'''simple docstring'''
super().__init__(**A__ )
snake_case_ : Optional[Any] = hidden_size
snake_case_ : str = intermediate_size
snake_case_ : Optional[Any] = num_hidden_layers
snake_case_ : int = num_attention_heads
snake_case_ : Optional[int] = num_channels
snake_case_ : Union[str, Any] = patch_size
snake_case_ : List[str] = image_size
snake_case_ : List[Any] = initializer_range
snake_case_ : Any = attention_dropout
snake_case_ : Any = layer_norm_eps
snake_case_ : int = hidden_act
@classmethod
def UpperCAmelCase__ ( cls : List[Any] , A__ : Union[str, os.PathLike] , **A__ : Optional[int] ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(A__ )
snake_case_ ,snake_case_ : Tuple = cls.get_config_dict(A__ , **A__ )
# get the vision config dict if we are loading from GITConfig
if config_dict.get("model_type" ) == "git":
snake_case_ : Any = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(A__ , **A__ )
class snake_case__ ( _UpperCamelCase ):
_SCREAMING_SNAKE_CASE : Optional[Any] = "git"
def __init__( self : Any , A__ : List[str]=None , A__ : List[str]=3_05_22 , A__ : Tuple=7_68 , A__ : Tuple=6 , A__ : str=12 , A__ : Any=30_72 , A__ : List[str]="gelu" , A__ : int=0.1 , A__ : Dict=0.1 , A__ : Any=10_24 , A__ : Optional[Any]=0.02 , A__ : Optional[Any]=1E-12 , A__ : Dict=0 , A__ : Any="absolute" , A__ : Tuple=True , A__ : Any=False , A__ : Tuple=1_01 , A__ : Tuple=1_02 , A__ : List[Any]=None , **A__ : List[str] , ) -> int:
'''simple docstring'''
super().__init__(bos_token_id=A__ , eos_token_id=A__ , pad_token_id=A__ , **A__ )
if vision_config is None:
snake_case_ : int = {}
logger.info("vision_config is None. initializing the GitVisionConfig with default values." )
snake_case_ : str = GitVisionConfig(**A__ )
snake_case_ : int = vocab_size
snake_case_ : List[Any] = hidden_size
snake_case_ : Tuple = num_hidden_layers
snake_case_ : List[Any] = num_attention_heads
snake_case_ : Any = hidden_act
snake_case_ : Dict = intermediate_size
snake_case_ : Any = hidden_dropout_prob
snake_case_ : Any = attention_probs_dropout_prob
snake_case_ : Union[str, Any] = max_position_embeddings
snake_case_ : List[str] = initializer_range
snake_case_ : List[str] = layer_norm_eps
snake_case_ : Any = position_embedding_type
snake_case_ : Union[str, Any] = use_cache
snake_case_ : str = tie_word_embeddings
snake_case_ : List[Any] = num_image_with_embedding
snake_case_ : Dict = bos_token_id
snake_case_ : int = eos_token_id
def UpperCAmelCase__ ( self : Any ) -> int:
'''simple docstring'''
snake_case_ : Tuple = copy.deepcopy(self.__dict__ )
snake_case_ : Optional[int] = self.vision_config.to_dict()
snake_case_ : Tuple = self.__class__.model_type
return output
| 666 | 0 |
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase_ : int , UpperCAmelCase_ : int ) -> str:
return "\n".join(
F'{number} * {i} = {number * i}' for i in range(1 , number_of_terms + 1 ) )
if __name__ == "__main__":
print(multiplication_table(number=5, number_of_terms=10))
| 13 | def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: str , lowerCAmelCase_: str ):
def get_matched_characters(lowerCAmelCase_: str , lowerCAmelCase_: str ) -> str:
snake_case_ : Tuple = []
snake_case_ : Tuple = min(len(_stra ) , len(_stra ) ) // 2
for i, l in enumerate(_stra ):
snake_case_ : str = int(max(0 , i - limit ) )
snake_case_ : Optional[int] = int(min(i + limit + 1 , len(_stra ) ) )
if l in _stra[left:right]:
matched.append(lowerCAmelCase_ )
snake_case_ : List[Any] = f"{_stra[0:_stra.index(lowerCAmelCase_ )]} {_stra[_stra.index(lowerCAmelCase_ ) + 1:]}"
return "".join(lowerCAmelCase_ )
# matching characters
snake_case_ : List[Any] = get_matched_characters(lowerCAmelCase_ , lowerCAmelCase_ )
snake_case_ : int = get_matched_characters(lowerCAmelCase_ , lowerCAmelCase_ )
snake_case_ : Optional[int] = len(lowerCAmelCase_ )
# transposition
snake_case_ : List[str] = (
len([(ca, ca) for ca, ca in zip(lowerCAmelCase_ , lowerCAmelCase_ ) if ca != ca] ) // 2
)
if not match_count:
snake_case_ : str = 0.0
else:
snake_case_ : Optional[Any] = (
1
/ 3
* (
match_count / len(lowerCAmelCase_ )
+ match_count / len(lowerCAmelCase_ )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
snake_case_ : Optional[Any] = 0
for ca, ca in zip(stra[:4] , stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler("hello", "world"))
| 666 | 0 |
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , _a , _a=1_3 , _a=3_2 , _a=3 , _a=4 , _a=[1_0, 2_0, 3_0, 4_0] , _a=[2, 2, 3, 2] , _a=True , _a=True , _a=3_7 , _a="gelu" , _a=1_0 , _a=0.02 , _a=["stage2", "stage3", "stage4"] , _a=3 , _a=None , ) -> Any:
_a : Dict = parent
_a : List[str] = batch_size
_a : Any = image_size
_a : Optional[int] = num_channels
_a : List[Any] = num_stages
_a : List[str] = hidden_sizes
_a : Tuple = depths
_a : Any = is_training
_a : Optional[int] = use_labels
_a : Optional[Any] = intermediate_size
_a : Optional[int] = hidden_act
_a : Dict = type_sequence_label_size
_a : str = initializer_range
_a : Optional[int] = out_features
_a : List[str] = num_labels
_a : Optional[Any] = scope
_a : Optional[int] = num_stages
def __lowercase ( self ) -> List[Any]:
_a : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_a : Tuple = None
if self.use_labels:
_a : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_a : Any = self.get_config()
return config, pixel_values, labels
def __lowercase ( self ) -> Dict:
return ConvNextConfig(
num_channels=self.num_channels , num_stages=self.num_stages , hidden_sizes=self.hidden_sizes , depths=self.depths , is_training=self.is_training , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , out_features=self.out_features , )
def __lowercase ( self ) -> Union[str, Any]:
return UperNetConfig(
backbone_config=self.get_backbone_config() , hidden_size=5_1_2 , pool_scales=[1, 2, 3, 6] , use_auxiliary_head=_a , auxiliary_loss_weight=0.4 , auxiliary_in_channels=4_0 , auxiliary_channels=2_5_6 , auxiliary_num_convs=1 , auxiliary_concat_input=_a , loss_ignore_index=2_5_5 , num_labels=self.num_labels , )
def __lowercase ( self , _a , _a , _a ) -> Any:
_a : Dict = UperNetForSemanticSegmentation(config=_a )
model.to(_a )
model.eval()
_a : List[str] = model(_a )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def __lowercase ( self ) -> Optional[Any]:
_a : Tuple = self.prepare_config_and_inputs()
(
(
_a
) , (
_a
) , (
_a
) ,
) : Dict = config_and_inputs
_a : Union[str, Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( __lowercase , __lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Tuple = (UperNetForSemanticSegmentation,) if is_torch_available() else ()
UpperCAmelCase__ : Union[str, Any] = {"image-segmentation": UperNetForSemanticSegmentation} if is_torch_available() else {}
UpperCAmelCase__ : Tuple = False
UpperCAmelCase__ : int = False
UpperCAmelCase__ : List[Any] = False
UpperCAmelCase__ : List[Any] = False
UpperCAmelCase__ : Tuple = False
UpperCAmelCase__ : Tuple = False
def __lowercase ( self ) -> str:
_a : Dict = UperNetModelTester(self )
_a : Tuple = ConfigTester(self , config_class=_a , has_text_modality=_a , hidden_size=3_7 )
def __lowercase ( self ) -> int:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __lowercase ( self ) -> Optional[int]:
return
def __lowercase ( self ) -> Tuple:
_a , _a : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : Tuple = model_class(_a )
_a : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a : Any = [*signature.parameters.keys()]
_a : Any = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _a )
def __lowercase ( self ) -> Tuple:
_a : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_a )
@unittest.skip(reason='''UperNet does not use inputs_embeds''' )
def __lowercase ( self ) -> Optional[int]:
pass
@unittest.skip(reason='''UperNet does not support input and output embeddings''' )
def __lowercase ( self ) -> Dict:
pass
@unittest.skip(reason='''UperNet does not have a base model''' )
def __lowercase ( self ) -> Dict:
pass
@unittest.skip(reason='''UperNet does not have a base model''' )
def __lowercase ( self ) -> int:
pass
@require_torch_multi_gpu
@unittest.skip(reason='''UperNet has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def __lowercase ( self ) -> Any:
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def __lowercase ( self ) -> List[str]:
pass
def __lowercase ( self ) -> int:
def check_hidden_states_output(_a , _a , _a ):
_a : Union[str, Any] = model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
_a : Any = model(**self._prepare_for_class(_a , _a ) )
_a : Optional[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_a : Optional[int] = self.model_tester.num_stages
self.assertEqual(len(_a ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_a , _a : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : int = True
check_hidden_states_output(_a , _a , _a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_a : int = True
check_hidden_states_output(_a , _a , _a )
def __lowercase ( self ) -> int:
_a , _a : int = self.model_tester.prepare_config_and_inputs_for_common()
_a : List[str] = _config_zero_init(_a )
_a : Tuple = _config_zero_init(configs_no_init.backbone_config )
for model_class in self.all_model_classes:
_a : Dict = model_class(config=_a )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip(reason='''UperNet does not have tied weights''' )
def __lowercase ( self ) -> List[Any]:
pass
@slow
def __lowercase ( self ) -> Union[str, Any]:
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a : Tuple = UperNetForSemanticSegmentation.from_pretrained(_a )
self.assertIsNotNone(_a )
def __UpperCAmelCase ( ) -> Dict:
"""simple docstring"""
_a : str = hf_hub_download(
repo_id='''hf-internal-testing/fixtures_ade20k''' ,repo_type='''dataset''' ,filename='''ADE_val_00000001.jpg''' )
_a : Optional[int] = Image.open(__a ).convert('''RGB''' )
return image
@require_torch
@require_vision
@slow
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self ) -> int:
_a : List[str] = AutoImageProcessor.from_pretrained('''openmmlab/upernet-swin-tiny''' )
_a : List[str] = UperNetForSemanticSegmentation.from_pretrained('''openmmlab/upernet-swin-tiny''' ).to(_a )
_a : List[str] = prepare_img()
_a : Union[str, Any] = processor(images=_a , return_tensors='''pt''' ).to(_a )
with torch.no_grad():
_a : Union[str, Any] = model(**_a )
_a : str = torch.Size((1, model.config.num_labels, 5_1_2, 5_1_2) )
self.assertEqual(outputs.logits.shape , _a )
_a : Tuple = torch.tensor(
[[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , _a , atol=1e-4 ) )
def __lowercase ( self ) -> str:
_a : Tuple = AutoImageProcessor.from_pretrained('''openmmlab/upernet-convnext-tiny''' )
_a : Union[str, Any] = UperNetForSemanticSegmentation.from_pretrained('''openmmlab/upernet-convnext-tiny''' ).to(_a )
_a : Dict = prepare_img()
_a : Union[str, Any] = processor(images=_a , return_tensors='''pt''' ).to(_a )
with torch.no_grad():
_a : Dict = model(**_a )
_a : int = torch.Size((1, model.config.num_labels, 5_1_2, 5_1_2) )
self.assertEqual(outputs.logits.shape , _a )
_a : Any = torch.tensor(
[[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , _a , atol=1e-4 ) )
| 14 | import argparse
import os
from pathlib import Path
import torch
from bark.generation import _load_model as _bark_load_model
from huggingface_hub import hf_hub_download
from transformers import EncodecConfig, EncodecModel, set_seed
from transformers.models.bark.configuration_bark import (
BarkCoarseConfig,
BarkConfig,
BarkFineConfig,
BarkSemanticConfig,
)
from transformers.models.bark.generation_configuration_bark import (
BarkCoarseGenerationConfig,
BarkFineGenerationConfig,
BarkGenerationConfig,
BarkSemanticGenerationConfig,
)
from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase = logging.get_logger(__name__)
set_seed(7_7_0)
UpperCAmelCase = {
"c_attn": "att_proj",
"c_proj": "out_proj",
"c_fc": "in_proj",
"transformer.": "",
"h.": "layers.",
"ln_1": "layernorm_1",
"ln_2": "layernorm_2",
"ln_f": "layernorm_final",
"wpe": "position_embeds_layer",
"wte": "input_embeds_layer",
}
UpperCAmelCase = {
"text_small": {
"repo_id": "suno/bark",
"file_name": "text.pt",
},
"coarse_small": {
"repo_id": "suno/bark",
"file_name": "coarse.pt",
},
"fine_small": {
"repo_id": "suno/bark",
"file_name": "fine.pt",
},
"text": {
"repo_id": "suno/bark",
"file_name": "text_2.pt",
},
"coarse": {
"repo_id": "suno/bark",
"file_name": "coarse_2.pt",
},
"fine": {
"repo_id": "suno/bark",
"file_name": "fine_2.pt",
},
}
UpperCAmelCase = os.path.dirname(os.path.abspath(__file__))
UpperCAmelCase = os.path.join(os.path.expanduser("~"), ".cache")
UpperCAmelCase = os.path.join(os.getenv("XDG_CACHE_HOME", default_cache_dir), "suno", "bark_v0")
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: int , lowerCAmelCase_: List[str]=False ):
snake_case_ : Union[str, Any] = model_type
if use_small:
key += "_small"
return os.path.join(lowerCAmelCase_ , REMOTE_MODEL_PATHS[key]["file_name"] )
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: str , lowerCAmelCase_: List[str] ):
os.makedirs(lowerCAmelCase_ , exist_ok=lowerCAmelCase_ )
hf_hub_download(repo_id=lowerCAmelCase_ , filename=lowerCAmelCase_ , local_dir=lowerCAmelCase_ )
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: Any , lowerCAmelCase_: Dict , lowerCAmelCase_: List[str]=False , lowerCAmelCase_: Dict="text" ):
if model_type == "text":
snake_case_ : int = BarkSemanticModel
snake_case_ : str = BarkSemanticConfig
snake_case_ : Optional[Any] = BarkSemanticGenerationConfig
elif model_type == "coarse":
snake_case_ : str = BarkCoarseModel
snake_case_ : Optional[int] = BarkCoarseConfig
snake_case_ : Any = BarkCoarseGenerationConfig
elif model_type == "fine":
snake_case_ : Optional[int] = BarkFineModel
snake_case_ : Tuple = BarkFineConfig
snake_case_ : List[str] = BarkFineGenerationConfig
else:
raise NotImplementedError()
snake_case_ : Optional[Any] = f"{model_type}_small" if use_small else model_type
snake_case_ : Any = REMOTE_MODEL_PATHS[model_key]
if not os.path.exists(lowerCAmelCase_ ):
logger.info(f"{model_type} model not found, downloading into `{CACHE_DIR}`." )
_download(model_info["repo_id"] , model_info["file_name"] )
snake_case_ : Any = torch.load(lowerCAmelCase_ , map_location=lowerCAmelCase_ )
# this is a hack
snake_case_ : Union[str, Any] = checkpoint["model_args"]
if "input_vocab_size" not in model_args:
snake_case_ : str = model_args["vocab_size"]
snake_case_ : Union[str, Any] = model_args["vocab_size"]
del model_args["vocab_size"]
# convert Bark model arguments to HF Bark model arguments
snake_case_ : Union[str, Any] = model_args.pop("n_head" )
snake_case_ : int = model_args.pop("n_embd" )
snake_case_ : Any = model_args.pop("n_layer" )
snake_case_ : List[str] = ConfigClass(**checkpoint["model_args"] )
snake_case_ : Optional[Any] = ModelClass(config=lowerCAmelCase_ )
snake_case_ : Tuple = GenerationConfigClass()
snake_case_ : List[str] = model_generation_config
snake_case_ : Optional[int] = checkpoint["model"]
# fixup checkpoint
snake_case_ : Optional[int] = "_orig_mod."
for k, v in list(state_dict.items() ):
if k.startswith(lowerCAmelCase_ ):
# replace part of the key with corresponding layer name in HF implementation
snake_case_ : Tuple = k[len(lowerCAmelCase_ ) :]
for old_layer_name in new_layer_name_dict:
snake_case_ : int = new_k.replace(lowerCAmelCase_ , new_layer_name_dict[old_layer_name] )
snake_case_ : int = state_dict.pop(lowerCAmelCase_ )
snake_case_ : Optional[int] = set(state_dict.keys() ) - set(model.state_dict().keys() )
snake_case_ : str = {k for k in extra_keys if not k.endswith(".attn.bias" )}
snake_case_ : Any = set(model.state_dict().keys() ) - set(state_dict.keys() )
snake_case_ : List[Any] = {k for k in missing_keys if not k.endswith(".attn.bias" )}
if len(lowerCAmelCase_ ) != 0:
raise ValueError(f"extra keys found: {extra_keys}" )
if len(lowerCAmelCase_ ) != 0:
raise ValueError(f"missing keys: {missing_keys}" )
model.load_state_dict(lowerCAmelCase_ , strict=lowerCAmelCase_ )
snake_case_ : str = model.num_parameters(exclude_embeddings=lowerCAmelCase_ )
snake_case_ : Union[str, Any] = checkpoint["best_val_loss"].item()
logger.info(f"model loaded: {round(n_params/1e6 , 1 )}M params, {round(lowerCAmelCase_ , 3 )} loss" )
model.eval()
model.to(lowerCAmelCase_ )
del checkpoint, state_dict
return model
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: List[Any] , lowerCAmelCase_: str=False , lowerCAmelCase_: int="text" ):
if model_type not in ("text", "coarse", "fine"):
raise NotImplementedError()
snake_case_ : int = "cpu" # do conversion on cpu
snake_case_ : Optional[Any] = _get_ckpt_path(lowerCAmelCase_ , use_small=lowerCAmelCase_ )
snake_case_ : Tuple = _load_model(lowerCAmelCase_ , lowerCAmelCase_ , model_type=lowerCAmelCase_ , use_small=lowerCAmelCase_ )
# load bark initial model
snake_case_ : int = _bark_load_model(lowerCAmelCase_ , "cpu" , model_type=lowerCAmelCase_ , use_small=lowerCAmelCase_ )
if model_type == "text":
snake_case_ : Union[str, Any] = bark_model["model"]
if model.num_parameters(exclude_embeddings=lowerCAmelCase_ ) != bark_model.get_num_params():
raise ValueError("initial and new models don't have the same number of parameters" )
# check if same output as the bark model
snake_case_ : Optional[Any] = 5
snake_case_ : Optional[int] = 1_0
if model_type in ["text", "coarse"]:
snake_case_ : Optional[Any] = torch.randint(2_5_6 , (batch_size, sequence_length) , dtype=torch.int )
snake_case_ : str = bark_model(lowerCAmelCase_ )[0]
snake_case_ : Tuple = model(lowerCAmelCase_ )
# take last logits
snake_case_ : List[str] = output_new_model_total.logits[:, [-1], :]
else:
snake_case_ : Optional[int] = 3
snake_case_ : str = 8
snake_case_ : List[str] = torch.randint(2_5_6 , (batch_size, sequence_length, n_codes_total) , dtype=torch.int )
snake_case_ : Any = model(lowerCAmelCase_ , lowerCAmelCase_ )
snake_case_ : Union[str, Any] = bark_model(lowerCAmelCase_ , lowerCAmelCase_ )
snake_case_ : Optional[int] = output_new_model_total.logits
# output difference should come from the difference of self-attention implementation design
if output_new_model.shape != output_old_model.shape:
raise ValueError("initial and new outputs don't have the same shape" )
if (output_new_model - output_old_model).abs().max().item() > 1e-3:
raise ValueError("initial and new outputs are not equal" )
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
model.save_pretrained(lowerCAmelCase_ )
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: Tuple , lowerCAmelCase_: List[str] , lowerCAmelCase_: Any , lowerCAmelCase_: List[Any] , lowerCAmelCase_: int , lowerCAmelCase_: Optional[Any] , ):
snake_case_ : Optional[Any] = os.path.join(lowerCAmelCase_ , lowerCAmelCase_ )
snake_case_ : Optional[Any] = BarkSemanticConfig.from_pretrained(os.path.join(lowerCAmelCase_ , "config.json" ) )
snake_case_ : List[Any] = BarkCoarseConfig.from_pretrained(os.path.join(lowerCAmelCase_ , "config.json" ) )
snake_case_ : List[str] = BarkFineConfig.from_pretrained(os.path.join(lowerCAmelCase_ , "config.json" ) )
snake_case_ : List[Any] = EncodecConfig.from_pretrained("facebook/encodec_24khz" )
snake_case_ : List[str] = BarkSemanticModel.from_pretrained(lowerCAmelCase_ )
snake_case_ : Optional[Any] = BarkCoarseModel.from_pretrained(lowerCAmelCase_ )
snake_case_ : Tuple = BarkFineModel.from_pretrained(lowerCAmelCase_ )
snake_case_ : Union[str, Any] = EncodecModel.from_pretrained("facebook/encodec_24khz" )
snake_case_ : Tuple = BarkConfig.from_sub_model_configs(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
snake_case_ : List[Any] = BarkGenerationConfig.from_sub_model_configs(
semantic.generation_config , coarseAcoustic.generation_config , fineAcoustic.generation_config )
snake_case_ : Optional[int] = BarkModel(lowerCAmelCase_ )
snake_case_ : int = semantic
snake_case_ : List[str] = coarseAcoustic
snake_case_ : str = fineAcoustic
snake_case_ : Optional[Any] = codec
snake_case_ : Any = bark_generation_config
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
bark.save_pretrained(lowerCAmelCase_ , repo_id=lowerCAmelCase_ , push_to_hub=lowerCAmelCase_ )
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument("model_type", type=str, help="text, coarse or fine.")
parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--is_small", action="store_true", help="convert the small version instead of the large.")
UpperCAmelCase = parser.parse_args()
load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small)
| 666 | 0 |
import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class A ( UpperCAmelCase__ ):
'''simple docstring'''
A__ = (EulerDiscreteScheduler,)
A__ = 10
def lowerCamelCase__ (self : List[str] , **_UpperCAmelCase : Optional[Any] ) -> Tuple:
"""simple docstring"""
lowercase__ = {
"""num_train_timesteps""": 1100,
"""beta_start""": 0.0_001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
}
config.update(**_UpperCAmelCase )
return config
def lowerCamelCase__ (self : Any ) -> List[str]:
"""simple docstring"""
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=_UpperCAmelCase )
def lowerCamelCase__ (self : List[str] ) -> str:
"""simple docstring"""
for beta_start, beta_end in zip([0.00_001, 0.0_001, 0.001] , [0.0_002, 0.002, 0.02] ):
self.check_over_configs(beta_start=_UpperCAmelCase , beta_end=_UpperCAmelCase )
def lowerCamelCase__ (self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=_UpperCAmelCase )
def lowerCamelCase__ (self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_UpperCAmelCase )
def lowerCamelCase__ (self : List[str] ) -> int:
"""simple docstring"""
lowercase__ = self.scheduler_classes[0]
lowercase__ = self.get_scheduler_config()
lowercase__ = scheduler_class(**_UpperCAmelCase )
scheduler.set_timesteps(self.num_inference_steps )
lowercase__ = torch.manual_seed(0 )
lowercase__ = self.dummy_model()
lowercase__ = self.dummy_sample_deter * scheduler.init_noise_sigma
lowercase__ = sample.to(_UpperCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
lowercase__ = scheduler.scale_model_input(_UpperCAmelCase , _UpperCAmelCase )
lowercase__ = model(_UpperCAmelCase , _UpperCAmelCase )
lowercase__ = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , generator=_UpperCAmelCase )
lowercase__ = output.prev_sample
lowercase__ = torch.sum(torch.abs(_UpperCAmelCase ) )
lowercase__ = torch.mean(torch.abs(_UpperCAmelCase ) )
assert abs(result_sum.item() - 10.0_807 ) < 1E-2
assert abs(result_mean.item() - 0.0_131 ) < 1E-3
def lowerCamelCase__ (self : List[Any] ) -> str:
"""simple docstring"""
lowercase__ = self.scheduler_classes[0]
lowercase__ = self.get_scheduler_config(prediction_type="""v_prediction""" )
lowercase__ = scheduler_class(**_UpperCAmelCase )
scheduler.set_timesteps(self.num_inference_steps )
lowercase__ = torch.manual_seed(0 )
lowercase__ = self.dummy_model()
lowercase__ = self.dummy_sample_deter * scheduler.init_noise_sigma
lowercase__ = sample.to(_UpperCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
lowercase__ = scheduler.scale_model_input(_UpperCAmelCase , _UpperCAmelCase )
lowercase__ = model(_UpperCAmelCase , _UpperCAmelCase )
lowercase__ = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , generator=_UpperCAmelCase )
lowercase__ = output.prev_sample
lowercase__ = torch.sum(torch.abs(_UpperCAmelCase ) )
lowercase__ = torch.mean(torch.abs(_UpperCAmelCase ) )
assert abs(result_sum.item() - 0.0_002 ) < 1E-2
assert abs(result_mean.item() - 2.2_6_7_6E-0_6 ) < 1E-3
def lowerCamelCase__ (self : List[Any] ) -> Optional[int]:
"""simple docstring"""
lowercase__ = self.scheduler_classes[0]
lowercase__ = self.get_scheduler_config()
lowercase__ = scheduler_class(**_UpperCAmelCase )
scheduler.set_timesteps(self.num_inference_steps , device=_UpperCAmelCase )
lowercase__ = torch.manual_seed(0 )
lowercase__ = self.dummy_model()
lowercase__ = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
lowercase__ = sample.to(_UpperCAmelCase )
for t in scheduler.timesteps:
lowercase__ = scheduler.scale_model_input(_UpperCAmelCase , _UpperCAmelCase )
lowercase__ = model(_UpperCAmelCase , _UpperCAmelCase )
lowercase__ = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , generator=_UpperCAmelCase )
lowercase__ = output.prev_sample
lowercase__ = torch.sum(torch.abs(_UpperCAmelCase ) )
lowercase__ = torch.mean(torch.abs(_UpperCAmelCase ) )
assert abs(result_sum.item() - 10.0_807 ) < 1E-2
assert abs(result_mean.item() - 0.0_131 ) < 1E-3
def lowerCamelCase__ (self : int ) -> List[Any]:
"""simple docstring"""
lowercase__ = self.scheduler_classes[0]
lowercase__ = self.get_scheduler_config()
lowercase__ = scheduler_class(**_UpperCAmelCase , use_karras_sigmas=_UpperCAmelCase )
scheduler.set_timesteps(self.num_inference_steps , device=_UpperCAmelCase )
lowercase__ = torch.manual_seed(0 )
lowercase__ = self.dummy_model()
lowercase__ = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
lowercase__ = sample.to(_UpperCAmelCase )
for t in scheduler.timesteps:
lowercase__ = scheduler.scale_model_input(_UpperCAmelCase , _UpperCAmelCase )
lowercase__ = model(_UpperCAmelCase , _UpperCAmelCase )
lowercase__ = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , generator=_UpperCAmelCase )
lowercase__ = output.prev_sample
lowercase__ = torch.sum(torch.abs(_UpperCAmelCase ) )
lowercase__ = torch.mean(torch.abs(_UpperCAmelCase ) )
assert abs(result_sum.item() - 124.52_299_499_511_719 ) < 1E-2
assert abs(result_mean.item() - 0.16_213_932_633_399_963 ) < 1E-3
| 15 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase = {
"configuration_upernet": ["UperNetConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
"UperNetForSemanticSegmentation",
"UperNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_upernet import UperNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_upernet import UperNetForSemanticSegmentation, UperNetPreTrainedModel
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 666 | 0 |
from __future__ import annotations
def __a ( A__ : list , A__ : int , A__ : int , A__ : int ):
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = input_list[low:mid], input_list[mid : high + 1]
while left and right:
result.append((left if left[0] <= right[0] else right).pop(0 ) )
SCREAMING_SNAKE_CASE = result + left + right
return input_list
def __a ( A__ : list ):
if len(A__ ) <= 1:
return input_list
SCREAMING_SNAKE_CASE = list(A__ )
# iteration for two-way merging
SCREAMING_SNAKE_CASE = 2
while p <= len(A__ ):
# getting low, high and middle value for merge-sort of single list
for i in range(0 , len(A__ ) , A__ ):
SCREAMING_SNAKE_CASE = i
SCREAMING_SNAKE_CASE = i + p - 1
SCREAMING_SNAKE_CASE = (low + high + 1) // 2
SCREAMING_SNAKE_CASE = merge(A__ , A__ , A__ , A__ )
# final merge of last two parts
if p * 2 >= len(A__ ):
SCREAMING_SNAKE_CASE = i
SCREAMING_SNAKE_CASE = merge(A__ , 0 , A__ , len(A__ ) - 1 )
break
p *= 2
return input_list
if __name__ == "__main__":
__A : Any = input('Enter numbers separated by a comma:\n').strip()
if user_input == "":
__A : Union[str, Any] = []
else:
__A : Optional[Any] = [int(item.strip()) for item in user_input.split(',')]
print(iter_merge_sort(unsorted)) | 16 | from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
UpperCAmelCase = logging.get_logger(__name__)
class snake_case__ ( _UpperCamelCase ):
_SCREAMING_SNAKE_CASE : str = ["pixel_values"]
def __init__( self : List[Any] , A__ : bool = True , A__ : Optional[Dict[str, int]] = None , A__ : PILImageResampling = PILImageResampling.BILINEAR , A__ : bool = True , A__ : Dict[str, int] = None , A__ : bool = True , A__ : Union[int, float] = 1 / 2_55 , A__ : bool = True , A__ : Optional[Union[float, List[float]]] = None , A__ : Optional[Union[float, List[float]]] = None , **A__ : int , ) -> None:
'''simple docstring'''
super().__init__(**A__ )
snake_case_ : Optional[int] = size if size is not None else {"shortest_edge": 2_56}
snake_case_ : Dict = get_size_dict(A__ , default_to_square=A__ )
snake_case_ : List[str] = crop_size if crop_size is not None else {"height": 2_24, "width": 2_24}
snake_case_ : Any = get_size_dict(A__ , param_name="crop_size" )
snake_case_ : int = do_resize
snake_case_ : Optional[Any] = size
snake_case_ : Optional[Any] = resample
snake_case_ : Optional[int] = do_center_crop
snake_case_ : List[Any] = crop_size
snake_case_ : List[Any] = do_rescale
snake_case_ : Optional[int] = rescale_factor
snake_case_ : Optional[Any] = do_normalize
snake_case_ : List[Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
snake_case_ : Optional[Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCAmelCase__ ( self : List[str] , A__ : np.ndarray , A__ : Dict[str, int] , A__ : PILImageResampling = PILImageResampling.BICUBIC , A__ : Optional[Union[str, ChannelDimension]] = None , **A__ : str , ) -> np.ndarray:
'''simple docstring'''
snake_case_ : Optional[Any] = get_size_dict(A__ , default_to_square=A__ )
if "shortest_edge" not in size:
raise ValueError(f"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}" )
snake_case_ : Any = get_resize_output_image_size(A__ , size=size["shortest_edge"] , default_to_square=A__ )
return resize(A__ , size=A__ , resample=A__ , data_format=A__ , **A__ )
def UpperCAmelCase__ ( self : int , A__ : np.ndarray , A__ : Dict[str, int] , A__ : Optional[Union[str, ChannelDimension]] = None , **A__ : Union[str, Any] , ) -> np.ndarray:
'''simple docstring'''
snake_case_ : Tuple = get_size_dict(A__ )
if "height" not in size or "width" not in size:
raise ValueError(f"The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}" )
return center_crop(A__ , size=(size["height"], size["width"]) , data_format=A__ , **A__ )
def UpperCAmelCase__ ( self : List[str] , A__ : np.ndarray , A__ : float , A__ : Optional[Union[str, ChannelDimension]] = None , **A__ : Tuple ) -> np.ndarray:
'''simple docstring'''
return rescale(A__ , scale=A__ , data_format=A__ , **A__ )
def UpperCAmelCase__ ( self : Tuple , A__ : np.ndarray , A__ : Union[float, List[float]] , A__ : Union[float, List[float]] , A__ : Optional[Union[str, ChannelDimension]] = None , **A__ : Dict , ) -> np.ndarray:
'''simple docstring'''
return normalize(A__ , mean=A__ , std=A__ , data_format=A__ , **A__ )
def UpperCAmelCase__ ( self : Union[str, Any] , A__ : ImageInput , A__ : Optional[bool] = None , A__ : Dict[str, int] = None , A__ : PILImageResampling = None , A__ : bool = None , A__ : Dict[str, int] = None , A__ : Optional[bool] = None , A__ : Optional[float] = None , A__ : Optional[bool] = None , A__ : Optional[Union[float, List[float]]] = None , A__ : Optional[Union[float, List[float]]] = None , A__ : Optional[Union[str, TensorType]] = None , A__ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **A__ : Union[str, Any] , ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Union[str, Any] = do_resize if do_resize is not None else self.do_resize
snake_case_ : Dict = size if size is not None else self.size
snake_case_ : Optional[Any] = get_size_dict(A__ , default_to_square=A__ )
snake_case_ : Tuple = resample if resample is not None else self.resample
snake_case_ : Union[str, Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
snake_case_ : str = crop_size if crop_size is not None else self.crop_size
snake_case_ : Tuple = get_size_dict(A__ , param_name="crop_size" )
snake_case_ : Dict = do_rescale if do_rescale is not None else self.do_rescale
snake_case_ : str = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case_ : Any = do_normalize if do_normalize is not None else self.do_normalize
snake_case_ : Any = image_mean if image_mean is not None else self.image_mean
snake_case_ : List[str] = image_std if image_std is not None else self.image_std
snake_case_ : Dict = make_list_of_images(A__ )
if not valid_images(A__ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
snake_case_ : Tuple = [to_numpy_array(A__ ) for image in images]
if do_resize:
snake_case_ : Any = [self.resize(image=A__ , size=A__ , resample=A__ ) for image in images]
if do_center_crop:
snake_case_ : List[str] = [self.center_crop(image=A__ , size=A__ ) for image in images]
if do_rescale:
snake_case_ : Any = [self.rescale(image=A__ , scale=A__ ) for image in images]
if do_normalize:
snake_case_ : Union[str, Any] = [self.normalize(image=A__ , mean=A__ , std=A__ ) for image in images]
snake_case_ : Optional[Any] = [to_channel_dimension_format(A__ , A__ ) for image in images]
snake_case_ : Any = {"pixel_values": images}
return BatchFeature(data=A__ , tensor_type=A__ )
def UpperCAmelCase__ ( self : List[str] , A__ : Dict , A__ : List[Tuple] = None ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Tuple = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(A__ ) != len(A__ ):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits" )
if is_torch_tensor(A__ ):
snake_case_ : Dict = target_sizes.numpy()
snake_case_ : int = []
for idx in range(len(A__ ) ):
snake_case_ : List[str] = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="bilinear" , align_corners=A__ )
snake_case_ : int = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(A__ )
else:
snake_case_ : List[Any] = logits.argmax(dim=1 )
snake_case_ : List[Any] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 666 | 0 |
import argparse
import json
import numpy
import torch
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def __SCREAMING_SNAKE_CASE ( a__ : Any ,a__ : Optional[Any] ) -> Union[str, Any]:
# Load checkpoint
__A : Union[str, Any] = torch.load(a__ ,map_location="""cpu""" )
__A : Optional[Any] = chkpt["""model"""]
# We have the base model one level deeper than the original XLM repository
__A : Optional[int] = {}
for k, v in state_dict.items():
if "pred_layer" in k:
__A : Tuple = v
else:
__A : Dict = v
__A : Optional[Any] = chkpt["""params"""]
__A : Tuple = {n: v for n, v in config.items() if not isinstance(a__ ,(torch.FloatTensor, numpy.ndarray) )}
__A : List[Any] = chkpt["""dico_word2id"""]
__A : Tuple = {s + """</w>""" if s.find("""@@""" ) == -1 and i > 13 else s.replace("""@@""" ,"""""" ): i for s, i in vocab.items()}
# Save pytorch-model
__A : Any = pytorch_dump_folder_path + """/""" + WEIGHTS_NAME
__A : Union[str, Any] = pytorch_dump_folder_path + """/""" + CONFIG_NAME
__A : Optional[Any] = pytorch_dump_folder_path + """/""" + VOCAB_FILES_NAMES["""vocab_file"""]
print(f"""Save PyTorch model to {pytorch_weights_dump_path}""" )
torch.save(a__ ,a__ )
print(f"""Save configuration file to {pytorch_config_dump_path}""" )
with open(a__ ,"""w""" ,encoding="""utf-8""" ) as f:
f.write(json.dumps(a__ ,indent=2 ) + """\n""" )
print(f"""Save vocab file to {pytorch_config_dump_path}""" )
with open(a__ ,"""w""" ,encoding="""utf-8""" ) as f:
f.write(json.dumps(a__ ,indent=2 ) + """\n""" )
if __name__ == "__main__":
UpperCAmelCase_ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--xlm_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
UpperCAmelCase_ : int = parser.parse_args()
convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
| 17 | from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase = {
"configuration_blenderbot": [
"BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlenderbotConfig",
"BlenderbotOnnxConfig",
],
"tokenization_blenderbot": ["BlenderbotTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = ["BlenderbotTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
"BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlenderbotForCausalLM",
"BlenderbotForConditionalGeneration",
"BlenderbotModel",
"BlenderbotPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
"TFBlenderbotForConditionalGeneration",
"TFBlenderbotModel",
"TFBlenderbotPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
"FlaxBlenderbotForConditionalGeneration",
"FlaxBlenderbotModel",
"FlaxBlenderbotPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 666 | 0 |
'''simple docstring'''
import pytest
from datasets.splits import SplitDict, SplitInfo
from datasets.utils.py_utils import asdict
@pytest.mark.parametrize(
"split_dict" , [
SplitDict(),
SplitDict({"train": SplitInfo(name="train" , num_bytes=1337 , num_examples=42 , dataset_name="my_dataset" )} ),
SplitDict({"train": SplitInfo(name="train" , num_bytes=1337 , num_examples=42 )} ),
SplitDict({"train": SplitInfo()} ),
] , )
def __a(SCREAMING_SNAKE_CASE_ : SplitDict ):
'''simple docstring'''
_lowerCAmelCase = split_dict._to_yaml_list()
assert len(SCREAMING_SNAKE_CASE_ ) == len(SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = SplitDict._from_yaml_list(SCREAMING_SNAKE_CASE_ )
for split_name, split_info in split_dict.items():
# dataset_name field is deprecated, and is therefore not part of the YAML dump
_lowerCAmelCase = None
# the split name of split_dict takes over the name of the split info object
_lowerCAmelCase = split_name
assert split_dict == reloaded
@pytest.mark.parametrize(
"split_info" , [SplitInfo(), SplitInfo(dataset_name=SCREAMING_SNAKE_CASE_ ), SplitInfo(dataset_name="my_dataset" )] )
def __a(SCREAMING_SNAKE_CASE_ : Dict ):
'''simple docstring'''
_lowerCAmelCase = asdict(SplitDict({"train": split_info} ) )
assert "dataset_name" in split_dict_asdict["train"]
assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
| 18 | from ...configuration_utils import PretrainedConfig
UpperCAmelCase = {
"google/tapas-base-finetuned-sqa": (
"https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json"
),
"google/tapas-base-finetuned-wtq": (
"https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json"
),
"google/tapas-base-finetuned-wikisql-supervised": (
"https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json"
),
"google/tapas-base-finetuned-tabfact": (
"https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json"
),
}
class snake_case__ ( _UpperCamelCase ):
_SCREAMING_SNAKE_CASE : Dict = "tapas"
def __init__( self : List[Any] , A__ : str=3_05_22 , A__ : Tuple=7_68 , A__ : List[Any]=12 , A__ : Optional[Any]=12 , A__ : Union[str, Any]=30_72 , A__ : Dict="gelu" , A__ : List[Any]=0.1 , A__ : str=0.1 , A__ : List[Any]=10_24 , A__ : Optional[int]=[3, 2_56, 2_56, 2, 2_56, 2_56, 10] , A__ : Union[str, Any]=0.02 , A__ : Tuple=1E-12 , A__ : Tuple=0 , A__ : Any=10.0 , A__ : List[str]=0 , A__ : List[str]=1.0 , A__ : Optional[Any]=None , A__ : Tuple=1.0 , A__ : Union[str, Any]=False , A__ : Any=None , A__ : Union[str, Any]=1.0 , A__ : int=1.0 , A__ : str=False , A__ : int=False , A__ : Optional[Any]="ratio" , A__ : str=None , A__ : int=None , A__ : Dict=64 , A__ : int=32 , A__ : Optional[Any]=False , A__ : List[str]=True , A__ : List[Any]=False , A__ : str=False , A__ : Any=True , A__ : Tuple=False , A__ : str=None , A__ : str=None , **A__ : List[str] , ) -> List[str]:
'''simple docstring'''
super().__init__(pad_token_id=A__ , **A__ )
# BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes)
snake_case_ : int = vocab_size
snake_case_ : int = hidden_size
snake_case_ : Optional[Any] = num_hidden_layers
snake_case_ : int = num_attention_heads
snake_case_ : Optional[int] = hidden_act
snake_case_ : Optional[int] = intermediate_size
snake_case_ : str = hidden_dropout_prob
snake_case_ : Dict = attention_probs_dropout_prob
snake_case_ : Any = max_position_embeddings
snake_case_ : List[Any] = type_vocab_sizes
snake_case_ : str = initializer_range
snake_case_ : Optional[Any] = layer_norm_eps
# Fine-tuning task hyperparameters
snake_case_ : Optional[int] = positive_label_weight
snake_case_ : Dict = num_aggregation_labels
snake_case_ : List[str] = aggregation_loss_weight
snake_case_ : str = use_answer_as_supervision
snake_case_ : int = answer_loss_importance
snake_case_ : Any = use_normalized_answer_loss
snake_case_ : int = huber_loss_delta
snake_case_ : List[Any] = temperature
snake_case_ : str = aggregation_temperature
snake_case_ : List[str] = use_gumbel_for_cells
snake_case_ : List[str] = use_gumbel_for_aggregation
snake_case_ : Dict = average_approximation_function
snake_case_ : List[str] = cell_selection_preference
snake_case_ : Dict = answer_loss_cutoff
snake_case_ : List[str] = max_num_rows
snake_case_ : Union[str, Any] = max_num_columns
snake_case_ : str = average_logits_per_cell
snake_case_ : Union[str, Any] = select_one_column
snake_case_ : Dict = allow_empty_column_selection
snake_case_ : List[Any] = init_cell_selection_weights_to_zero
snake_case_ : str = reset_position_index_per_cell
snake_case_ : List[Any] = disable_per_token_loss
# Aggregation hyperparameters
snake_case_ : List[str] = aggregation_labels
snake_case_ : Union[str, Any] = no_aggregation_label_index
if isinstance(self.aggregation_labels , A__ ):
snake_case_ : Optional[int] = {int(A__ ): v for k, v in aggregation_labels.items()}
| 666 | 0 |
"""simple docstring"""
from timeit import timeit
_a = {
"""MALAYALAM""": True,
"""String""": False,
"""rotor""": True,
"""level""": True,
"""A""": True,
"""BB""": True,
"""ABC""": False,
"""amanaplanacanalpanama""": True, # "a man a plan a canal panama"
}
# Ensure our test data is valid
assert all((key == key[::-1]) is value for key, value in test_data.items())
def lowerCamelCase__ ( __snake_case ) -> bool:
"""simple docstring"""
_UpperCamelCase = 0
_UpperCamelCase = len(__snake_case ) - 1
while start_i < end_i:
if s[start_i] == s[end_i]:
start_i += 1
end_i -= 1
else:
return False
return True
def lowerCamelCase__ ( __snake_case ) -> bool:
"""simple docstring"""
_UpperCamelCase = len(__snake_case ) // 2
_UpperCamelCase = len(__snake_case )
# We need to traverse till half of the length of string
# as we can get access of the i'th last element from
# i'th index.
# eg: [0,1,2,3,4,5] => 4th index can be accessed
# with the help of 1st index (i==n-i-1)
# where n is length of string
return all(s[i] == s[n - i - 1] for i in range(__snake_case ) )
def lowerCamelCase__ ( __snake_case ) -> bool:
"""simple docstring"""
if len(__snake_case ) <= 2:
return True
if s[0] == s[len(__snake_case ) - 1]:
return is_palindrome_recursive(s[1:-1] )
else:
return False
def lowerCamelCase__ ( __snake_case ) -> bool:
"""simple docstring"""
return s == s[::-1]
def lowerCamelCase__ ( __snake_case ) -> None:
"""simple docstring"""
_UpperCamelCase = F'''all({name}(key) is value for key, value in test_data.items())'''
_UpperCamelCase = F'''from __main__ import test_data, {name}'''
_UpperCamelCase = 50_00_00
_UpperCamelCase = timeit(stmt=__snake_case, setup=__snake_case, number=__snake_case )
print(F'''{name:<35} finished {number:,} runs in {result:.5f} seconds''' )
if __name__ == "__main__":
for key, value in test_data.items():
assert is_palindrome(key) is is_palindrome_recursive(key)
assert is_palindrome(key) is is_palindrome_slice(key)
print(F"""{key:21} {value}""")
print("""a man a plan a canal panama""")
# finished 500,000 runs in 0.46793 seconds
benchmark_function("""is_palindrome_slice""")
# finished 500,000 runs in 0.85234 seconds
benchmark_function("""is_palindrome""")
# finished 500,000 runs in 1.32028 seconds
benchmark_function("""is_palindrome_recursive""")
# finished 500,000 runs in 2.08679 seconds
benchmark_function("""is_palindrome_traversal""")
| 19 | import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class snake_case__ ( datasets.BeamBasedBuilder ):
def UpperCAmelCase__ ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
return datasets.DatasetInfo(
features=datasets.Features({"content": datasets.Value("string" )} ) , supervised_keys=A__ , )
def UpperCAmelCase__ ( self : Optional[Any] , A__ : str , A__ : str ) -> Optional[int]:
'''simple docstring'''
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"examples": get_test_dummy_examples()} )]
def UpperCAmelCase__ ( self : int , A__ : Optional[int] , A__ : Dict ) -> Optional[Any]:
'''simple docstring'''
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(A__ )
class snake_case__ ( datasets.BeamBasedBuilder ):
def UpperCAmelCase__ ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
return datasets.DatasetInfo(
features=datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string" )} )} ) , supervised_keys=A__ , )
def UpperCAmelCase__ ( self : Any , A__ : List[str] , A__ : str ) -> Optional[int]:
'''simple docstring'''
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"examples": get_test_nested_examples()} )
]
def UpperCAmelCase__ ( self : List[Any] , A__ : List[str] , A__ : Optional[int] ) -> List[str]:
'''simple docstring'''
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(A__ )
def SCREAMING_SNAKE_CASE_ ( ):
return [(i, {"content": content}) for i, content in enumerate(["foo", "bar", "foobar"] )]
def SCREAMING_SNAKE_CASE_ ( ):
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(["foo", "bar", "foobar"] )]
class snake_case__ ( _UpperCamelCase ):
@require_beam
def UpperCAmelCase__ ( self : str ) -> List[str]:
'''simple docstring'''
snake_case_ : Union[str, Any] = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
snake_case_ : Dict = DummyBeamDataset(cache_dir=A__ , beam_runner="DirectRunner" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(A__ , builder.name , "default" , "0.0.0" , f"{builder.name}-train.arrow" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"content": datasets.Value("string" )} ) )
snake_case_ : Optional[int] = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , A__ )
self.assertEqual(dset["train"].info.splits["train"].num_examples , A__ )
self.assertDictEqual(dset["train"][0] , get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset["train"][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(A__ , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
@require_beam
def UpperCAmelCase__ ( self : str ) -> Optional[Any]:
'''simple docstring'''
import apache_beam as beam
snake_case_ : Tuple = beam.io.parquetio.WriteToParquet
snake_case_ : Union[str, Any] = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
snake_case_ : List[Any] = DummyBeamDataset(cache_dir=A__ , beam_runner="DirectRunner" )
with patch("apache_beam.io.parquetio.WriteToParquet" ) as write_parquet_mock:
snake_case_ : int = partial(A__ , num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
A__ , builder.name , "default" , "0.0.0" , f"{builder.name}-train-00000-of-00002.arrow" ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
A__ , builder.name , "default" , "0.0.0" , f"{builder.name}-train-00000-of-00002.arrow" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"content": datasets.Value("string" )} ) )
snake_case_ : Optional[Any] = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , A__ )
self.assertEqual(dset["train"].info.splits["train"].num_examples , A__ )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset["train"]["content"] ) , sorted(["foo", "bar", "foobar"] ) )
self.assertTrue(
os.path.exists(os.path.join(A__ , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
@require_beam
def UpperCAmelCase__ ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_cache_dir:
snake_case_ : Tuple = DummyBeamDataset(cache_dir=A__ )
self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare )
@require_beam
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Optional[int] = len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
snake_case_ : List[str] = NestedBeamDataset(cache_dir=A__ , beam_runner="DirectRunner" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(A__ , builder.name , "default" , "0.0.0" , f"{builder.name}-train.arrow" ) ) )
self.assertDictEqual(
builder.info.features , datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string" )} )} ) )
snake_case_ : int = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , A__ )
self.assertEqual(dset["train"].info.splits["train"].num_examples , A__ )
self.assertDictEqual(dset["train"][0] , get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset["train"][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(A__ , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
| 666 | 0 |
import datasets
_lowerCAmelCase: List[str] = '\\n@InProceedings{conneau2018xnli,\n author = "Conneau, Alexis\n and Rinott, Ruty\n and Lample, Guillaume\n and Williams, Adina\n and Bowman, Samuel R.\n and Schwenk, Holger\n and Stoyanov, Veselin",\n title = "XNLI: Evaluating Cross-lingual Sentence Representations",\n booktitle = "Proceedings of the 2018 Conference on Empirical Methods\n in Natural Language Processing",\n year = "2018",\n publisher = "Association for Computational Linguistics",\n location = "Brussels, Belgium",\n}\n'
_lowerCAmelCase: List[Any] = '\\nXNLI is a subset of a few thousand examples from MNLI which has been translated\ninto a 14 different languages (some low-ish resource). As with MNLI, the goal is\nto predict textual entailment (does sentence A imply/contradict/neither sentence\nB) and is a classification task (given two sentences, predict one of three\nlabels).\n'
_lowerCAmelCase: Union[str, Any] = '\nComputes XNLI score which is just simple accuracy.\nArgs:\n predictions: Predicted labels.\n references: Ground truth labels.\nReturns:\n \'accuracy\': accuracy\nExamples:\n\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> xnli_metric = datasets.load_metric("xnli")\n >>> results = xnli_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n'
def _lowercase( __a : Optional[int] , __a : List[Any] ):
return (preds == labels).mean()
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase_ (datasets.Metric ):
def __UpperCamelCase ( self) -> Any:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('int64' if self.config_name != 'sts-b' else 'float32'),
'references': datasets.Value('int64' if self.config_name != 'sts-b' else 'float32'),
}) , codebase_urls=[] , reference_urls=[] , format='numpy' , )
def __UpperCamelCase ( self , lowercase_ , lowercase_) -> List[str]:
return {"accuracy": simple_accuracy(lowercase_ , lowercase_)}
| 20 | import pytest
from datasets import inspect_metric, list_metrics, load_metric
@pytest.fixture
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: int ):
monkeypatch.setattr("datasets.utils.deprecation_utils._emitted_deprecation_warnings" , set() )
@pytest.fixture
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: Tuple ):
class snake_case__ :
def __init__( self : Any , A__ : List[str] ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : List[Any] = metric_id
class snake_case__ :
_SCREAMING_SNAKE_CASE : List[str] = [MetricMock(_UpperCamelCase ) for metric_id in ["accuracy", "mse", "precision", "codeparrot/apps_metric"]]
def UpperCAmelCase__ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
return self._metrics
monkeypatch.setattr("datasets.inspect.huggingface_hub" , HfhMock() )
@pytest.mark.parametrize(
"func, args" , [(load_metric, ("metrics/mse",)), (list_metrics, ()), (inspect_metric, ("metrics/mse", "tmp_path"))] )
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: Tuple , lowerCAmelCase_: int , lowerCAmelCase_: List[Any] , lowerCAmelCase_: Any , lowerCAmelCase_: List[str] ):
if "tmp_path" in args:
snake_case_ : List[Any] = tuple(arg if arg != "tmp_path" else tmp_path for arg in args )
with pytest.warns(lowerCAmelCase_ , match="https://huggingface.co/docs/evaluate" ):
func(*lowerCAmelCase_ )
| 666 | 0 |
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase_ : Any = logging.get_logger(__name__)
UpperCAmelCase_ : List[str] = {
"nvidia/segformer-b0-finetuned-ade-512-512": (
"https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json"
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class __A ( UpperCamelCase__ ):
UpperCamelCase = """segformer"""
def __init__( self :List[str] , __snake_case :str=3 , __snake_case :Optional[Any]=4 , __snake_case :List[Any]=[2, 2, 2, 2] , __snake_case :Dict=[8, 4, 2, 1] , __snake_case :Optional[int]=[32, 64, 1_60, 2_56] , __snake_case :Union[str, Any]=[7, 3, 3, 3] , __snake_case :Optional[Any]=[4, 2, 2, 2] , __snake_case :Tuple=[1, 2, 5, 8] , __snake_case :List[Any]=[4, 4, 4, 4] , __snake_case :Optional[Any]="gelu" , __snake_case :Tuple=0.0 , __snake_case :Dict=0.0 , __snake_case :Optional[int]=0.1 , __snake_case :Optional[int]=0.02 , __snake_case :Tuple=0.1 , __snake_case :Union[str, Any]=1E-6 , __snake_case :int=2_56 , __snake_case :Optional[int]=2_55 , **__snake_case :Dict , ):
'''simple docstring'''
super().__init__(**__snake_case )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
"""Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be"""
""" removed, as the behaviour will default to that of reshape_last_stage = True.""" , __snake_case , )
__magic_name__ : Dict =num_channels
__magic_name__ : str =num_encoder_blocks
__magic_name__ : List[Any] =depths
__magic_name__ : Optional[Any] =sr_ratios
__magic_name__ : List[str] =hidden_sizes
__magic_name__ : List[str] =patch_sizes
__magic_name__ : Any =strides
__magic_name__ : Optional[Any] =mlp_ratios
__magic_name__ : str =num_attention_heads
__magic_name__ : int =hidden_act
__magic_name__ : List[Any] =hidden_dropout_prob
__magic_name__ : Optional[Any] =attention_probs_dropout_prob
__magic_name__ : Optional[Any] =classifier_dropout_prob
__magic_name__ : List[str] =initializer_range
__magic_name__ : List[str] =drop_path_rate
__magic_name__ : List[Any] =layer_norm_eps
__magic_name__ : List[str] =decoder_hidden_size
__magic_name__ : Union[str, Any] =kwargs.get("""reshape_last_stage""" , __snake_case )
__magic_name__ : Dict =semantic_loss_ignore_index
class __A ( UpperCamelCase__ ):
UpperCamelCase = version.parse("""1.11""" )
@property
def A__ ( self :List[str] ):
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def A__ ( self :Any ):
'''simple docstring'''
return 1E-4
@property
def A__ ( self :int ):
'''simple docstring'''
return 12
| 21 | from __future__ import annotations
import bisect
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: list[int] , lowerCAmelCase_: int , lowerCAmelCase_: int = 0 , lowerCAmelCase_: int = -1 ):
if hi < 0:
snake_case_ : Any = len(lowerCAmelCase_ )
while lo < hi:
snake_case_ : List[Any] = lo + (hi - lo) // 2
if sorted_collection[mid] < item:
snake_case_ : Tuple = mid + 1
else:
snake_case_ : Dict = mid
return lo
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: list[int] , lowerCAmelCase_: int , lowerCAmelCase_: int = 0 , lowerCAmelCase_: int = -1 ):
if hi < 0:
snake_case_ : Optional[Any] = len(lowerCAmelCase_ )
while lo < hi:
snake_case_ : Union[str, Any] = lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
snake_case_ : Optional[Any] = mid + 1
else:
snake_case_ : Tuple = mid
return lo
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: list[int] , lowerCAmelCase_: int , lowerCAmelCase_: int = 0 , lowerCAmelCase_: int = -1 ):
sorted_collection.insert(bisect_left(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) , lowerCAmelCase_ )
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: list[int] , lowerCAmelCase_: int , lowerCAmelCase_: int = 0 , lowerCAmelCase_: int = -1 ):
sorted_collection.insert(bisect_right(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) , lowerCAmelCase_ )
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: list[int] , lowerCAmelCase_: int ):
snake_case_ : Dict = 0
snake_case_ : Tuple = len(lowerCAmelCase_ ) - 1
while left <= right:
snake_case_ : int = left + (right - left) // 2
snake_case_ : Optional[Any] = sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
snake_case_ : Optional[Any] = midpoint - 1
else:
snake_case_ : Optional[int] = midpoint + 1
return None
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: list[int] , lowerCAmelCase_: int ):
snake_case_ : Optional[int] = bisect.bisect_left(lowerCAmelCase_ , lowerCAmelCase_ )
if index != len(lowerCAmelCase_ ) and sorted_collection[index] == item:
return index
return None
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: list[int] , lowerCAmelCase_: int , lowerCAmelCase_: int , lowerCAmelCase_: int ):
if right < left:
return None
snake_case_ : List[Any] = left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , midpoint - 1 )
else:
return binary_search_by_recursion(lowerCAmelCase_ , lowerCAmelCase_ , midpoint + 1 , lowerCAmelCase_ )
if __name__ == "__main__":
UpperCAmelCase = input("Enter numbers separated by comma:\n").strip()
UpperCAmelCase = sorted(int(item) for item in user_input.split(","))
UpperCAmelCase = int(input("Enter a single number to be found in the list:\n"))
UpperCAmelCase = binary_search(collection, target)
if result is None:
print(F"{target} was not found in {collection}.")
else:
print(F"{target} was found at position {result} in {collection}.")
| 666 | 0 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def snake_case_ ():
'''simple docstring'''
_a = ArgumentParser('''Accelerate CLI tool''' , usage='''accelerate <command> [<args>]''' , allow_abbrev=UpperCamelCase )
_a = parser.add_subparsers(help='''accelerate command helpers''' )
# Register commands
get_config_parser(subparsers=UpperCamelCase )
env_command_parser(subparsers=UpperCamelCase )
launch_command_parser(subparsers=UpperCamelCase )
tpu_command_parser(subparsers=UpperCamelCase )
test_command_parser(subparsers=UpperCamelCase )
# Let's go
_a = parser.parse_args()
if not hasattr(UpperCamelCase , '''func''' ):
parser.print_help()
exit(1 )
# Run
args.func(UpperCamelCase )
if __name__ == "__main__":
main()
| 22 | import inspect
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel, VQModel
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class snake_case__ ( _UpperCamelCase ):
def __init__( self : Union[str, Any] , A__ : VQModel , A__ : UNetaDModel , A__ : DDIMScheduler ) -> List[Any]:
'''simple docstring'''
super().__init__()
self.register_modules(vqvae=A__ , unet=A__ , scheduler=A__ )
@torch.no_grad()
def __call__( self : str , A__ : int = 1 , A__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , A__ : float = 0.0 , A__ : int = 50 , A__ : Optional[str] = "pil" , A__ : bool = True , **A__ : Optional[Any] , ) -> Union[Tuple, ImagePipelineOutput]:
'''simple docstring'''
snake_case_ : Optional[int] = randn_tensor(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=A__ , )
snake_case_ : List[Any] = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
snake_case_ : Any = latents * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(A__ )
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
snake_case_ : Union[str, Any] = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
snake_case_ : List[Any] = {}
if accepts_eta:
snake_case_ : int = eta
for t in self.progress_bar(self.scheduler.timesteps ):
snake_case_ : Union[str, Any] = self.scheduler.scale_model_input(A__ , A__ )
# predict the noise residual
snake_case_ : Dict = self.unet(A__ , A__ ).sample
# compute the previous noisy sample x_t -> x_t-1
snake_case_ : Union[str, Any] = self.scheduler.step(A__ , A__ , A__ , **A__ ).prev_sample
# decode the image latents with the VAE
snake_case_ : int = self.vqvae.decode(A__ ).sample
snake_case_ : Dict = (image / 2 + 0.5).clamp(0 , 1 )
snake_case_ : Dict = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
snake_case_ : Optional[int] = self.numpy_to_pil(A__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=A__ )
| 666 | 0 |
import math
from datetime import datetime, timedelta
def _snake_case (__lowercase):
UpperCamelCase_ = year % 19
UpperCamelCase_ = year % 4
UpperCamelCase_ = year % 7
UpperCamelCase_ = math.floor(year / 100)
UpperCamelCase_ = math.floor((13 + 8 * leap_day_inhibits) / 25)
UpperCamelCase_ = leap_day_inhibits / 4
UpperCamelCase_ = (
15 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 30
UpperCamelCase_ = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
UpperCamelCase_ = (19 * metonic_cycle + secular_moon_shift) % 30
# PHM -> Paschal Full Moon
UpperCamelCase_ = (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 29 and days_from_phm_to_sunday == 6:
return datetime(__lowercase , 4 , 19)
elif days_to_add == 28 and days_from_phm_to_sunday == 6:
return datetime(__lowercase , 4 , 18)
else:
return datetime(__lowercase , 3 , 22) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday))
if __name__ == "__main__":
for year in (1_9_9_4, 2_0_0_0, 2_0_1_0, 2_0_2_1, 2_0_2_3):
snake_case__ : Dict = """will be""" if year > datetime.now().year else """was"""
print(f'Easter in {year} {tense} {gauss_easter(year)}')
| 23 | from decimal import Decimal, getcontext
from math import ceil, factorial
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: int ):
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
raise TypeError("Undefined for non-integers" )
elif precision < 1:
raise ValueError("Undefined for non-natural numbers" )
snake_case_ : List[str] = precision
snake_case_ : Union[str, Any] = ceil(precision / 1_4 )
snake_case_ : List[str] = 4_2_6_8_8_0 * Decimal(1_0_0_0_5 ).sqrt()
snake_case_ : str = 1
snake_case_ : List[str] = 1_3_5_9_1_4_0_9
snake_case_ : str = Decimal(lowerCAmelCase_ )
for k in range(1 , lowerCAmelCase_ ):
snake_case_ : Tuple = factorial(6 * k ) // (factorial(3 * k ) * factorial(lowerCAmelCase_ ) ** 3)
linear_term += 5_4_5_1_4_0_1_3_4
exponential_term *= -2_6_2_5_3_7_4_1_2_6_4_0_7_6_8_0_0_0
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
UpperCAmelCase = 5_0
print(F"The first {n} digits of pi is: {pi(n)}")
| 666 | 0 |
'''simple docstring'''
def _UpperCamelCase (_lowerCamelCase : int , _lowerCamelCase : float , _lowerCamelCase : float )-> float:
'''simple docstring'''
return round(float(moles / volume ) * nfactor )
def _UpperCamelCase (_lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float )-> float:
'''simple docstring'''
return round(float((moles * 0.0821 * temperature) / (volume) ) )
def _UpperCamelCase (_lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float )-> float:
'''simple docstring'''
return round(float((moles * 0.0821 * temperature) / (pressure) ) )
def _UpperCamelCase (_lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float )-> float:
'''simple docstring'''
return round(float((pressure * volume) / (0.0821 * moles) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 24 | def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: int = 1_0_0_0 ):
snake_case_ ,snake_case_ : List[str] = 1, 1
snake_case_ : List[str] = 2
while True:
snake_case_ : Tuple = 0
snake_case_ : Union[str, Any] = fa + fa
snake_case_ ,snake_case_ : str = fa, f
index += 1
for _ in str(lowerCAmelCase_ ):
i += 1
if i == n:
break
return index
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 666 | 0 |
from typing import Dict
import numpy as np
import torch
from . import residue_constants as rc
from .tensor_utils import tensor_tree_map, tree_map
def lowerCamelCase__ ( _a):
SCREAMING_SNAKE_CASE : Any = []
SCREAMING_SNAKE_CASE : Union[str, Any] = []
SCREAMING_SNAKE_CASE : str = []
for rt in rc.restypes:
SCREAMING_SNAKE_CASE : Tuple = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]]
restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names])
SCREAMING_SNAKE_CASE : Dict = {name: i for i, name in enumerate(_a)}
restype_atomaa_to_atomaa_list.append(
[(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types])
restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names])
# Add dummy mapping for restype 'UNK'
restype_atomaa_to_atomaa_list.append([0] * 14)
restype_atomaa_to_atomaa_list.append([0] * 37)
restype_atomaa_mask_list.append([0.0] * 14)
SCREAMING_SNAKE_CASE : Tuple = torch.tensor(
_a , dtype=torch.intaa , device=protein["aatype"].device , )
SCREAMING_SNAKE_CASE : List[Any] = torch.tensor(
_a , dtype=torch.intaa , device=protein["aatype"].device , )
SCREAMING_SNAKE_CASE : List[str] = torch.tensor(
_a , dtype=torch.floataa , device=protein["aatype"].device , )
SCREAMING_SNAKE_CASE : Optional[Any] = protein["aatype"].to(torch.long)
# create the mapping for (residx, atom14) --> atom37, i.e. an array
# with shape (num_res, 14) containing the atom37 indices for this protein
SCREAMING_SNAKE_CASE : Any = restype_atomaa_to_atomaa[protein_aatype]
SCREAMING_SNAKE_CASE : int = restype_atomaa_mask[protein_aatype]
SCREAMING_SNAKE_CASE : List[str] = residx_atomaa_mask
SCREAMING_SNAKE_CASE : Tuple = residx_atomaa_to_atomaa.long()
# create the gather indices for mapping back
SCREAMING_SNAKE_CASE : str = restype_atomaa_to_atomaa[protein_aatype]
SCREAMING_SNAKE_CASE : str = residx_atomaa_to_atomaa.long()
# create the corresponding mask
SCREAMING_SNAKE_CASE : List[Any] = torch.zeros([21, 37] , dtype=torch.floataa , device=protein["aatype"].device)
for restype, restype_letter in enumerate(rc.restypes):
SCREAMING_SNAKE_CASE : Union[str, Any] = rc.restype_atoa[restype_letter]
SCREAMING_SNAKE_CASE : Optional[int] = rc.residue_atoms[restype_name]
for atom_name in atom_names:
SCREAMING_SNAKE_CASE : Union[str, Any] = rc.atom_order[atom_name]
SCREAMING_SNAKE_CASE : Optional[Any] = 1
SCREAMING_SNAKE_CASE : Optional[int] = restype_atomaa_mask[protein_aatype]
SCREAMING_SNAKE_CASE : int = residx_atomaa_mask
return protein
def lowerCamelCase__ ( _a):
SCREAMING_SNAKE_CASE : List[Any] = tree_map(lambda _a: torch.tensor(_a , device=batch["aatype"].device) , _a , np.ndarray)
SCREAMING_SNAKE_CASE : int = tensor_tree_map(lambda _a: np.array(_a) , make_atomaa_masks(_a))
return out | 25 | from __future__ import annotations
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: list[int | float] , lowerCAmelCase_: int , lowerCAmelCase_: int ):
if len(lowerCAmelCase_ ) == 0:
raise ValueError("find_max() arg is an empty sequence" )
if (
left >= len(lowerCAmelCase_ )
or left < -len(lowerCAmelCase_ )
or right >= len(lowerCAmelCase_ )
or right < -len(lowerCAmelCase_ )
):
raise IndexError("list index out of range" )
if left == right:
return nums[left]
snake_case_ : List[Any] = (left + right) >> 1 # the middle
snake_case_ : Dict = find_max(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) # find max in range[left, mid]
snake_case_ : int = find_max(lowerCAmelCase_ , mid + 1 , lowerCAmelCase_ ) # find max in range[mid + 1, right]
return left_max if left_max >= right_max else right_max
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 666 | 0 |
'''simple docstring'''
import json
import os
from pathlib import Path
import pytest
from datasets.download.download_config import DownloadConfig
from datasets.download.download_manager import DownloadManager
from datasets.utils.file_utils import hash_url_to_filename
__UpperCamelCase = "http://www.mocksite.com/file1.txt"
__UpperCamelCase = "\"text\": [\"foo\", \"foo\"]"
__UpperCamelCase = "6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8"
class _A :
lowercase__: str = 200
lowercase__: List[str] = {'''Content-Length''': '''100'''}
lowercase__: Union[str, Any] = {}
def lowercase__ ( self : Any , **__magic_name__ : List[Any] ) -> Dict:
"""simple docstring"""
return [bytes(__magic_name__ , """utf-8""" )]
def _a ( *_lowerCamelCase , **_lowerCamelCase ) -> List[str]:
"""simple docstring"""
return MockResponse()
@pytest.mark.parametrize("""urls_type""" , [str, list, dict] )
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[str]:
"""simple docstring"""
import requests
monkeypatch.setattr(_lowerCamelCase , """request""" , _lowerCamelCase )
__snake_case : Union[str, Any] = URL
if issubclass(_lowerCamelCase , _lowerCamelCase ):
__snake_case : str = url
elif issubclass(_lowerCamelCase , _lowerCamelCase ):
__snake_case : Dict = [url]
elif issubclass(_lowerCamelCase , _lowerCamelCase ):
__snake_case : Union[str, Any] = {"""train""": url}
__snake_case : Dict = """dummy"""
__snake_case : List[str] = """downloads"""
__snake_case : List[Any] = tmp_path
__snake_case : List[Any] = DownloadConfig(
cache_dir=os.path.join(_lowerCamelCase , _lowerCamelCase ) , use_etag=_lowerCamelCase , )
__snake_case : List[str] = DownloadManager(dataset_name=_lowerCamelCase , download_config=_lowerCamelCase )
__snake_case : int = dl_manager.download(_lowerCamelCase )
__snake_case : Tuple = urls
for downloaded_paths in [downloaded_paths]:
if isinstance(_lowerCamelCase , _lowerCamelCase ):
__snake_case : Any = [downloaded_paths]
__snake_case : List[Any] = [urls]
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
assert "train" in downloaded_paths.keys()
__snake_case : Tuple = downloaded_paths.values()
__snake_case : Optional[int] = urls.values()
assert downloaded_paths
for downloaded_path, input_url in zip(_lowerCamelCase , _lowerCamelCase ):
assert downloaded_path == dl_manager.downloaded_paths[input_url]
__snake_case : List[str] = Path(_lowerCamelCase )
__snake_case : Any = downloaded_path.parts
assert parts[-1] == HASH
assert parts[-2] == cache_subdir
assert downloaded_path.exists()
__snake_case : Union[str, Any] = downloaded_path.read_text()
assert content == CONTENT
__snake_case : List[str] = downloaded_path.with_suffix(""".json""" )
assert metadata_downloaded_path.exists()
__snake_case : Union[str, Any] = json.loads(metadata_downloaded_path.read_text() )
assert metadata_content == {"url": URL, "etag": None}
@pytest.mark.parametrize("""paths_type""" , [str, list, dict] )
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[Any]:
"""simple docstring"""
__snake_case : Any = str(_lowerCamelCase )
if issubclass(_lowerCamelCase , _lowerCamelCase ):
__snake_case : Optional[int] = filename
elif issubclass(_lowerCamelCase , _lowerCamelCase ):
__snake_case : Tuple = [filename]
elif issubclass(_lowerCamelCase , _lowerCamelCase ):
__snake_case : Dict = {"""train""": filename}
__snake_case : Optional[Any] = """dummy"""
__snake_case : List[Any] = xz_file.parent
__snake_case : int = """extracted"""
__snake_case : Dict = DownloadConfig(
cache_dir=_lowerCamelCase , use_etag=_lowerCamelCase , )
__snake_case : List[str] = DownloadManager(dataset_name=_lowerCamelCase , download_config=_lowerCamelCase )
__snake_case : Optional[Any] = dl_manager.extract(_lowerCamelCase )
__snake_case : Union[str, Any] = paths
for extracted_paths in [extracted_paths]:
if isinstance(_lowerCamelCase , _lowerCamelCase ):
__snake_case : Dict = [extracted_paths]
__snake_case : int = [paths]
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
assert "train" in extracted_paths.keys()
__snake_case : int = extracted_paths.values()
__snake_case : int = paths.values()
assert extracted_paths
for extracted_path, input_path in zip(_lowerCamelCase , _lowerCamelCase ):
assert extracted_path == dl_manager.extracted_paths[input_path]
__snake_case : Any = Path(_lowerCamelCase )
__snake_case : str = extracted_path.parts
assert parts[-1] == hash_url_to_filename(_lowerCamelCase , etag=_lowerCamelCase )
assert parts[-2] == extracted_subdir
assert extracted_path.exists()
__snake_case : Optional[int] = extracted_path.read_text()
__snake_case : str = text_file.read_text()
assert extracted_file_content == expected_file_content
def _a ( _lowerCamelCase , _lowerCamelCase ) -> Optional[int]:
"""simple docstring"""
assert path.endswith(""".jsonl""" )
for num_items, line in enumerate(_lowerCamelCase , start=1 ):
__snake_case : Tuple = json.loads(line.decode("""utf-8""" ) )
assert item.keys() == {"col_1", "col_2", "col_3"}
assert num_items == 4
@pytest.mark.parametrize("""archive_jsonl""" , ["""tar_jsonl_path""", """zip_jsonl_path"""] )
def _a ( _lowerCamelCase , _lowerCamelCase ) -> Optional[int]:
"""simple docstring"""
__snake_case : Any = request.getfixturevalue(_lowerCamelCase )
__snake_case : str = DownloadManager()
for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(_lowerCamelCase ) , start=1 ):
_test_jsonl(_lowerCamelCase , _lowerCamelCase )
assert num_jsonl == 2
@pytest.mark.parametrize("""archive_nested_jsonl""" , ["""tar_nested_jsonl_path""", """zip_nested_jsonl_path"""] )
def _a ( _lowerCamelCase , _lowerCamelCase ) -> List[str]:
"""simple docstring"""
__snake_case : int = request.getfixturevalue(_lowerCamelCase )
__snake_case : List[str] = DownloadManager()
for num_tar, (path, file) in enumerate(dl_manager.iter_archive(_lowerCamelCase ) , start=1 ):
for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(_lowerCamelCase ) , start=1 ):
_test_jsonl(_lowerCamelCase , _lowerCamelCase )
assert num_tar == 1
assert num_jsonl == 2
def _a ( _lowerCamelCase ) -> Any:
"""simple docstring"""
__snake_case : List[str] = DownloadManager()
for num_file, file in enumerate(dl_manager.iter_files(_lowerCamelCase ) , start=1 ):
assert os.path.basename(_lowerCamelCase ) == ("test.txt" if num_file == 1 else "train.txt")
assert num_file == 2
| 26 | import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
UpperCAmelCase = {
"vocab_file": {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/vocab.json",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/vocab.json",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/vocab.json",
"roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json",
"roberta-large-openai-detector": (
"https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json"
),
},
"merges_file": {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/merges.txt",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/merges.txt",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/merges.txt",
"roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt",
"roberta-large-openai-detector": (
"https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt"
),
},
"tokenizer_file": {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/tokenizer.json",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/tokenizer.json",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json",
"roberta-base-openai-detector": (
"https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json"
),
"roberta-large-openai-detector": (
"https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json"
),
},
}
UpperCAmelCase = {
"roberta-base": 5_1_2,
"roberta-large": 5_1_2,
"roberta-large-mnli": 5_1_2,
"distilroberta-base": 5_1_2,
"roberta-base-openai-detector": 5_1_2,
"roberta-large-openai-detector": 5_1_2,
}
class snake_case__ ( _UpperCamelCase ):
_SCREAMING_SNAKE_CASE : Dict = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE : Any = PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_SCREAMING_SNAKE_CASE : int = ["input_ids", "attention_mask"]
_SCREAMING_SNAKE_CASE : List[str] = RobertaTokenizer
def __init__( self : Optional[int] , A__ : List[Any]=None , A__ : Optional[int]=None , A__ : List[str]=None , A__ : Dict="replace" , A__ : List[str]="<s>" , A__ : Optional[Any]="</s>" , A__ : List[str]="</s>" , A__ : List[Any]="<s>" , A__ : int="<unk>" , A__ : int="<pad>" , A__ : List[Any]="<mask>" , A__ : Any=False , A__ : Optional[int]=True , **A__ : Union[str, Any] , ) -> int:
'''simple docstring'''
super().__init__(
A__ , A__ , tokenizer_file=A__ , errors=A__ , bos_token=A__ , eos_token=A__ , sep_token=A__ , cls_token=A__ , unk_token=A__ , pad_token=A__ , mask_token=A__ , add_prefix_space=A__ , trim_offsets=A__ , **A__ , )
snake_case_ : Dict = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , A__ ) != add_prefix_space:
snake_case_ : List[Any] = getattr(A__ , pre_tok_state.pop("type" ) )
snake_case_ : Any = add_prefix_space
snake_case_ : List[Any] = pre_tok_class(**A__ )
snake_case_ : Optional[int] = add_prefix_space
snake_case_ : List[str] = "post_processor"
snake_case_ : Tuple = getattr(self.backend_tokenizer , A__ , A__ )
if tokenizer_component_instance:
snake_case_ : List[str] = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
snake_case_ : str = tuple(state["sep"] )
if "cls" in state:
snake_case_ : Tuple = tuple(state["cls"] )
snake_case_ : Tuple = False
if state.get("add_prefix_space" , A__ ) != add_prefix_space:
snake_case_ : Optional[Any] = add_prefix_space
snake_case_ : str = True
if state.get("trim_offsets" , A__ ) != trim_offsets:
snake_case_ : Optional[int] = trim_offsets
snake_case_ : List[Any] = True
if changes_to_apply:
snake_case_ : int = getattr(A__ , state.pop("type" ) )
snake_case_ : List[Any] = component_class(**A__ )
setattr(self.backend_tokenizer , A__ , A__ )
@property
def UpperCAmelCase__ ( self : Optional[Any] ) -> str:
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def UpperCAmelCase__ ( self : Tuple , A__ : Dict ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Any = AddedToken(A__ , lstrip=A__ , rstrip=A__ ) if isinstance(A__ , A__ ) else value
snake_case_ : Any = value
def UpperCAmelCase__ ( self : int , *A__ : Optional[Any] , **A__ : int ) -> BatchEncoding:
'''simple docstring'''
snake_case_ : Optional[Any] = kwargs.get("is_split_into_words" , A__ )
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*A__ , **A__ )
def UpperCAmelCase__ ( self : Union[str, Any] , *A__ : Any , **A__ : List[Any] ) -> BatchEncoding:
'''simple docstring'''
snake_case_ : Optional[int] = kwargs.get("is_split_into_words" , A__ )
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._encode_plus(*A__ , **A__ )
def UpperCAmelCase__ ( self : Tuple , A__ : str , A__ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
snake_case_ : Optional[Any] = self._tokenizer.model.save(A__ , name=A__ )
return tuple(A__ )
def UpperCAmelCase__ ( self : int , A__ : List[str] , A__ : Union[str, Any]=None ) -> Any:
'''simple docstring'''
snake_case_ : List[str] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def UpperCAmelCase__ ( self : Dict , A__ : List[int] , A__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
snake_case_ : str = [self.sep_token_id]
snake_case_ : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 666 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A : Dict = {
"configuration_bigbird_pegasus": [
"BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BigBirdPegasusConfig",
"BigBirdPegasusOnnxConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Tuple = [
"BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST",
"BigBirdPegasusForCausalLM",
"BigBirdPegasusForConditionalGeneration",
"BigBirdPegasusForQuestionAnswering",
"BigBirdPegasusForSequenceClassification",
"BigBirdPegasusModel",
"BigBirdPegasusPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP,
BigBirdPegasusConfig,
BigBirdPegasusOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST,
BigBirdPegasusForCausalLM,
BigBirdPegasusForConditionalGeneration,
BigBirdPegasusForQuestionAnswering,
BigBirdPegasusForSequenceClassification,
BigBirdPegasusModel,
BigBirdPegasusPreTrainedModel,
)
else:
import sys
__A : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 27 | from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFXLMRobertaModel
@require_tf
@require_sentencepiece
@require_tokenizers
class snake_case__ ( unittest.TestCase ):
@slow
def UpperCAmelCase__ ( self : int ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Dict = TFXLMRobertaModel.from_pretrained("jplu/tf-xlm-roberta-base" )
snake_case_ : Any = {
"input_ids": tf.convert_to_tensor([[0, 26_46, 1_02_69, 83, 9_99_42, 2]] , dtype=tf.intaa ), # "My dog is cute"
"attention_mask": tf.convert_to_tensor([[1, 1, 1, 1, 1, 1]] , dtype=tf.intaa ),
}
snake_case_ : List[str] = model(A__ )["last_hidden_state"]
snake_case_ : str = tf.TensorShape((1, 6, 7_68) )
self.assertEqual(output.shape , A__ )
# compare the actual values for a slice.
snake_case_ : List[str] = tf.convert_to_tensor(
[
[
[0.068_1762, 0.1089_4451, 0.0677_2504],
[-0.0642_3668, 0.0236_6615, 0.0432_9344],
[-0.0605_7295, 0.0997_4135, -0.0007_0584],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 666 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase_ = {"configuration_xlnet": ["XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLNetConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["XLNetTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["XLNetTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"XLNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"XLNetForMultipleChoice",
"XLNetForQuestionAnswering",
"XLNetForQuestionAnsweringSimple",
"XLNetForSequenceClassification",
"XLNetForTokenClassification",
"XLNetLMHeadModel",
"XLNetModel",
"XLNetPreTrainedModel",
"load_tf_weights_in_xlnet",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXLNetForMultipleChoice",
"TFXLNetForQuestionAnsweringSimple",
"TFXLNetForSequenceClassification",
"TFXLNetForTokenClassification",
"TFXLNetLMHeadModel",
"TFXLNetMainLayer",
"TFXLNetModel",
"TFXLNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 28 | from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
UpperCAmelCase = logging.get_logger(__name__)
if is_vision_available():
import PIL
class snake_case__ ( _UpperCamelCase ):
_SCREAMING_SNAKE_CASE : Dict = ["pixel_values"]
def __init__( self : Union[str, Any] , A__ : bool = True , A__ : Dict[str, int] = None , A__ : PILImageResampling = PILImageResampling.BICUBIC , A__ : bool = True , A__ : Dict[str, int] = None , A__ : bool = True , A__ : Union[int, float] = 1 / 2_55 , A__ : bool = True , A__ : Optional[Union[float, List[float]]] = None , A__ : Optional[Union[float, List[float]]] = None , A__ : bool = True , **A__ : Optional[int] , ) -> None:
'''simple docstring'''
super().__init__(**A__ )
snake_case_ : str = size if size is not None else {"shortest_edge": 2_24}
snake_case_ : Union[str, Any] = get_size_dict(A__ , default_to_square=A__ )
snake_case_ : List[Any] = crop_size if crop_size is not None else {"height": 2_24, "width": 2_24}
snake_case_ : Dict = get_size_dict(A__ , default_to_square=A__ , param_name="crop_size" )
snake_case_ : str = do_resize
snake_case_ : str = size
snake_case_ : Optional[Any] = resample
snake_case_ : Any = do_center_crop
snake_case_ : Any = crop_size
snake_case_ : str = do_rescale
snake_case_ : Optional[Any] = rescale_factor
snake_case_ : int = do_normalize
snake_case_ : Optional[Any] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
snake_case_ : List[str] = image_std if image_std is not None else OPENAI_CLIP_STD
snake_case_ : int = do_convert_rgb
def UpperCAmelCase__ ( self : Optional[int] , A__ : np.ndarray , A__ : Dict[str, int] , A__ : PILImageResampling = PILImageResampling.BICUBIC , A__ : Optional[Union[str, ChannelDimension]] = None , **A__ : List[str] , ) -> np.ndarray:
'''simple docstring'''
snake_case_ : str = get_size_dict(A__ , default_to_square=A__ )
if "shortest_edge" not in size:
raise ValueError(f"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}" )
snake_case_ : str = get_resize_output_image_size(A__ , size=size["shortest_edge"] , default_to_square=A__ )
return resize(A__ , size=A__ , resample=A__ , data_format=A__ , **A__ )
def UpperCAmelCase__ ( self : Tuple , A__ : np.ndarray , A__ : Dict[str, int] , A__ : Optional[Union[str, ChannelDimension]] = None , **A__ : List[Any] , ) -> np.ndarray:
'''simple docstring'''
snake_case_ : Optional[int] = get_size_dict(A__ )
if "height" not in size or "width" not in size:
raise ValueError(f"The `size` parameter must contain the keys (height, width). Got {size.keys()}" )
return center_crop(A__ , size=(size["height"], size["width"]) , data_format=A__ , **A__ )
def UpperCAmelCase__ ( self : Optional[Any] , A__ : np.ndarray , A__ : Union[int, float] , A__ : Optional[Union[str, ChannelDimension]] = None , **A__ : List[str] , ) -> str:
'''simple docstring'''
return rescale(A__ , scale=A__ , data_format=A__ , **A__ )
def UpperCAmelCase__ ( self : Any , A__ : np.ndarray , A__ : Union[float, List[float]] , A__ : Union[float, List[float]] , A__ : Optional[Union[str, ChannelDimension]] = None , **A__ : Any , ) -> np.ndarray:
'''simple docstring'''
return normalize(A__ , mean=A__ , std=A__ , data_format=A__ , **A__ )
def UpperCAmelCase__ ( self : List[Any] , A__ : ImageInput , A__ : bool = None , A__ : Dict[str, int] = None , A__ : PILImageResampling = None , A__ : bool = None , A__ : int = None , A__ : bool = None , A__ : float = None , A__ : bool = None , A__ : Optional[Union[float, List[float]]] = None , A__ : Optional[Union[float, List[float]]] = None , A__ : bool = None , A__ : Optional[Union[str, TensorType]] = None , A__ : Optional[ChannelDimension] = ChannelDimension.FIRST , **A__ : Optional[Any] , ) -> PIL.Image.Image:
'''simple docstring'''
snake_case_ : List[Any] = do_resize if do_resize is not None else self.do_resize
snake_case_ : Union[str, Any] = size if size is not None else self.size
snake_case_ : Any = get_size_dict(A__ , param_name="size" , default_to_square=A__ )
snake_case_ : Optional[int] = resample if resample is not None else self.resample
snake_case_ : int = do_center_crop if do_center_crop is not None else self.do_center_crop
snake_case_ : List[str] = crop_size if crop_size is not None else self.crop_size
snake_case_ : Tuple = get_size_dict(A__ , param_name="crop_size" , default_to_square=A__ )
snake_case_ : Optional[int] = do_rescale if do_rescale is not None else self.do_rescale
snake_case_ : str = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case_ : List[Any] = do_normalize if do_normalize is not None else self.do_normalize
snake_case_ : Any = image_mean if image_mean is not None else self.image_mean
snake_case_ : List[str] = image_std if image_std is not None else self.image_std
snake_case_ : List[str] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
snake_case_ : List[Any] = make_list_of_images(A__ )
if not valid_images(A__ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
snake_case_ : Dict = [convert_to_rgb(A__ ) for image in images]
# All transformations expect numpy arrays.
snake_case_ : Dict = [to_numpy_array(A__ ) for image in images]
if do_resize:
snake_case_ : Dict = [self.resize(image=A__ , size=A__ , resample=A__ ) for image in images]
if do_center_crop:
snake_case_ : Tuple = [self.center_crop(image=A__ , size=A__ ) for image in images]
if do_rescale:
snake_case_ : str = [self.rescale(image=A__ , scale=A__ ) for image in images]
if do_normalize:
snake_case_ : int = [self.normalize(image=A__ , mean=A__ , std=A__ ) for image in images]
snake_case_ : List[Any] = [to_channel_dimension_format(A__ , A__ ) for image in images]
snake_case_ : Tuple = {"pixel_values": images}
return BatchFeature(data=A__ , tensor_type=A__ )
| 666 | 0 |
"""simple docstring"""
def lowercase ( lowerCAmelCase__ ):
if num <= 0:
raise ValueError('''Input must be a positive integer''' )
lowerCamelCase_ = [True] * (num + 1)
lowerCamelCase_ = 2
while p * p <= num:
if primes[p]:
for i in range(p * p ,num + 1 ,lowerCAmelCase__ ):
lowerCamelCase_ = False
p += 1
return [prime for prime in range(2 ,num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
A_ = int(input("""Enter a positive integer: """).strip())
print(prime_sieve_eratosthenes(user_num))
| 29 | from __future__ import annotations
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: tuple[int, int] , lowerCAmelCase_: int ):
snake_case_ ,snake_case_ : Dict = position
snake_case_ : int = [
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
snake_case_ : Union[str, Any] = []
for position in positions:
snake_case_ ,snake_case_ : Union[str, Any] = position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(lowerCAmelCase_ )
return permissible_positions
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: list[list[int]] ):
return not any(elem == 0 for row in board for elem in row )
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: list[list[int]] , lowerCAmelCase_: tuple[int, int] , lowerCAmelCase_: int ):
if is_complete(lowerCAmelCase_ ):
return True
for position in get_valid_pos(lowerCAmelCase_ , len(lowerCAmelCase_ ) ):
snake_case_ ,snake_case_ : Dict = position
if board[y][x] == 0:
snake_case_ : List[str] = curr + 1
if open_knight_tour_helper(lowerCAmelCase_ , lowerCAmelCase_ , curr + 1 ):
return True
snake_case_ : Dict = 0
return False
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: int ):
snake_case_ : Any = [[0 for i in range(lowerCAmelCase_ )] for j in range(lowerCAmelCase_ )]
for i in range(lowerCAmelCase_ ):
for j in range(lowerCAmelCase_ ):
snake_case_ : Optional[Any] = 1
if open_knight_tour_helper(lowerCAmelCase_ , (i, j) , 1 ):
return board
snake_case_ : Dict = 0
snake_case_ : str = f"Open Kight Tour cannot be performed on a board of size {n}"
raise ValueError(lowerCAmelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 666 | 0 |
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = filter(lambda _lowercase : p.requires_grad , model.parameters() )
UpperCAmelCase_ : str = sum([np.prod(p.size() ) for p in model_parameters] )
return params
__a = logging.getLogger(__name__)
def lowerCamelCase__ ( _lowercase , _lowercase ):
'''simple docstring'''
if metric == "rouge2":
UpperCAmelCase_ : List[Any] = '''{val_avg_rouge2:.4f}-{step_count}'''
elif metric == "bleu":
UpperCAmelCase_ : int = '''{val_avg_bleu:.4f}-{step_count}'''
elif metric == "em":
UpperCAmelCase_ : Any = '''{val_avg_em:.4f}-{step_count}'''
else:
raise NotImplementedError(
f'''seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this'''
''' function.''' )
UpperCAmelCase_ : List[Any] = ModelCheckpoint(
dirpath=_lowercase , filename=_lowercase , monitor=f'''val_{metric}''' , mode='''max''' , save_top_k=3 , every_n_epochs=1 , )
return checkpoint_callback
def lowerCamelCase__ ( _lowercase , _lowercase ):
'''simple docstring'''
return EarlyStopping(
monitor=f'''val_{metric}''' , mode='''min''' if '''loss''' in metric else '''max''' , patience=_lowercase , verbose=_lowercase , )
class __a( pl.Callback ):
"""simple docstring"""
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Tuple:
UpperCAmelCase_ : Tuple = {f'''lr_group_{i}''': param['''lr'''] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(_SCREAMING_SNAKE_CASE )
@rank_zero_only
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=True ) -> None:
logger.info(f'''***** {type_path} results at step {trainer.global_step:05d} *****''' )
UpperCAmelCase_ : List[Any] = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['''log''', '''progress_bar''', '''preds''']} )
# Log results
UpperCAmelCase_ : str = Path(pl_module.hparams.output_dir )
if type_path == "test":
UpperCAmelCase_ : Dict = od / '''test_results.txt'''
UpperCAmelCase_ : Union[str, Any] = od / '''test_generations.txt'''
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
UpperCAmelCase_ : Tuple = od / f'''{type_path}_results/{trainer.global_step:05d}.txt'''
UpperCAmelCase_ : int = od / f'''{type_path}_generations/{trainer.global_step:05d}.txt'''
results_file.parent.mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
generations_file.parent.mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
with open(_SCREAMING_SNAKE_CASE ,'''a+''' ) as writer:
for key in sorted(_SCREAMING_SNAKE_CASE ):
if key in ["log", "progress_bar", "preds"]:
continue
UpperCAmelCase_ : Dict = metrics[key]
if isinstance(_SCREAMING_SNAKE_CASE ,torch.Tensor ):
UpperCAmelCase_ : Any = val.item()
UpperCAmelCase_ : Any = f'''{key}: {val:.6f}\n'''
writer.write(_SCREAMING_SNAKE_CASE )
if not save_generations:
return
if "preds" in metrics:
UpperCAmelCase_ : Union[str, Any] = '''\n'''.join(metrics['''preds'''] )
generations_file.open('''w+''' ).write(_SCREAMING_SNAKE_CASE )
@rank_zero_only
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Optional[int]:
try:
UpperCAmelCase_ : Optional[Any] = pl_module.model.model.num_parameters()
except AttributeError:
UpperCAmelCase_ : Tuple = pl_module.model.num_parameters()
UpperCAmelCase_ : Optional[int] = count_trainable_parameters(_SCREAMING_SNAKE_CASE )
# mp stands for million parameters
trainer.logger.log_metrics({'''n_params''': npars, '''mp''': npars / 1e6, '''grad_mp''': n_trainable_pars / 1e6} )
@rank_zero_only
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
save_json(pl_module.metrics ,pl_module.metrics_save_path )
return self._write_logs(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,'''test''' )
@rank_zero_only
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Optional[Any]:
save_json(pl_module.metrics ,pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid") | 30 | from ...configuration_utils import PretrainedConfig
class snake_case__ ( _UpperCamelCase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = "bert-generation"
def __init__( self : Optional[int] , A__ : List[Any]=5_03_58 , A__ : Any=10_24 , A__ : Any=24 , A__ : List[Any]=16 , A__ : List[Any]=40_96 , A__ : int="gelu" , A__ : List[str]=0.1 , A__ : List[str]=0.1 , A__ : str=5_12 , A__ : int=0.02 , A__ : Any=1E-12 , A__ : Optional[Any]=0 , A__ : List[str]=2 , A__ : Optional[int]=1 , A__ : str="absolute" , A__ : Any=True , **A__ : Optional[Any] , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(pad_token_id=A__ , bos_token_id=A__ , eos_token_id=A__ , **A__ )
snake_case_ : str = vocab_size
snake_case_ : int = hidden_size
snake_case_ : Optional[Any] = num_hidden_layers
snake_case_ : Union[str, Any] = num_attention_heads
snake_case_ : Optional[Any] = hidden_act
snake_case_ : Tuple = intermediate_size
snake_case_ : str = hidden_dropout_prob
snake_case_ : Optional[Any] = attention_probs_dropout_prob
snake_case_ : str = max_position_embeddings
snake_case_ : Optional[Any] = initializer_range
snake_case_ : Optional[int] = layer_norm_eps
snake_case_ : str = position_embedding_type
snake_case_ : Dict = use_cache
| 666 | 0 |
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, PLBartTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase__ : List[str] = get_tests_dir('fixtures/test_sentencepiece.model')
if is_torch_available():
from transformers.models.plbart.modeling_plbart import shift_tokens_right
lowerCamelCase__ : int = 50_003
lowerCamelCase__ : List[str] = 50_002
@require_sentencepiece
@require_tokenizers
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowercase_ = PLBartTokenizer
lowercase_ = None
lowercase_ = False
def lowerCAmelCase_ ( self : List[str] ):
super().setUp()
# We have a SentencePiece fixture for testing
SCREAMING_SNAKE_CASE_ = PLBartTokenizer(_lowerCAmelCase , language_codes='base' , keep_accents=_lowerCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase_ ( self : int ):
SCREAMING_SNAKE_CASE_ = PLBartTokenizer(_lowerCAmelCase , language_codes='base' , keep_accents=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = tokenizer.tokenize('This is a test' )
self.assertListEqual(_lowerCAmelCase , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_lowerCAmelCase ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
SCREAMING_SNAKE_CASE_ = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
_lowerCAmelCase , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
SCREAMING_SNAKE_CASE_ = tokenizer.convert_tokens_to_ids(_lowerCAmelCase )
self.assertListEqual(
_lowerCAmelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
SCREAMING_SNAKE_CASE_ = tokenizer.convert_ids_to_tokens(_lowerCAmelCase )
self.assertListEqual(
_lowerCAmelCase , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
SCREAMING_SNAKE_CASE_ = tokenizer.vocab_size
SCREAMING_SNAKE_CASE_ = [tokenizer.convert_ids_to_tokens(_lowerCAmelCase ) for x in range(end - 4 , _lowerCAmelCase )]
self.assertListEqual(_lowerCAmelCase , ['__java__', '__python__', '__en_XX__', '<mask>'] )
SCREAMING_SNAKE_CASE_ = 'java.lang.Exception, python.lang.Exception, javascript, php, ruby, go'
SCREAMING_SNAKE_CASE_ = tokenizer(_lowerCAmelCase ).input_ids
self.assertEqual(
tokenizer.decode(_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase , clean_up_tokenization_spaces=_lowerCAmelCase ) , _lowerCAmelCase , )
def lowerCAmelCase_ ( self : int ):
SCREAMING_SNAKE_CASE_ = PLBartTokenizer(_lowerCAmelCase , language_codes='multi' , keep_accents=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = tokenizer.tokenize('This is a test' )
self.assertListEqual(_lowerCAmelCase , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_lowerCAmelCase ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
SCREAMING_SNAKE_CASE_ = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
_lowerCAmelCase , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
SCREAMING_SNAKE_CASE_ = tokenizer.convert_tokens_to_ids(_lowerCAmelCase )
self.assertListEqual(
_lowerCAmelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
SCREAMING_SNAKE_CASE_ = tokenizer.convert_ids_to_tokens(_lowerCAmelCase )
self.assertListEqual(
_lowerCAmelCase , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
SCREAMING_SNAKE_CASE_ = tokenizer.vocab_size
SCREAMING_SNAKE_CASE_ = [tokenizer.convert_ids_to_tokens(_lowerCAmelCase ) for x in range(end - 7 , _lowerCAmelCase )]
self.assertListEqual(
_lowerCAmelCase , ['__java__', '__python__', '__en_XX__', '__javascript__', '__php__', '__ruby__', '__go__'] )
SCREAMING_SNAKE_CASE_ = 'java.lang.Exception, python.lang.Exception, javascript, php, ruby, go'
SCREAMING_SNAKE_CASE_ = tokenizer(_lowerCAmelCase ).input_ids
self.assertEqual(
tokenizer.decode(_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase , clean_up_tokenization_spaces=_lowerCAmelCase ) , _lowerCAmelCase , )
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
lowercase_ = "uclanlp/plbart-python-en_XX"
lowercase_ = [
"def maximum(a,b,c):NEW_LINE_INDENTreturn max([a,b,c])",
"def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])",
]
lowercase_ = [
"Returns the maximum value of a b c.",
"Sums the values of a b c.",
]
lowercase_ = [
134,
5_452,
33_460,
33_441,
33_463,
33_465,
33_463,
33_449,
988,
20,
33_456,
19,
33_456,
771,
39,
4_258,
889,
3_318,
33_441,
33_463,
33_465,
33_463,
33_449,
2_471,
2,
PYTHON_CODE,
]
@classmethod
def lowerCAmelCase_ ( cls : Optional[int] ):
SCREAMING_SNAKE_CASE_ = PLBartTokenizer.from_pretrained(
cls.checkpoint_name , language_codes='base' , src_lang='python' , tgt_lang='en_XX' )
SCREAMING_SNAKE_CASE_ = 1
return cls
def lowerCAmelCase_ ( self : Union[str, Any] ):
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['__java__'] , 50_001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['__python__'] , 50_002 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['__en_XX__'] , 50_003 )
def lowerCAmelCase_ ( self : Any ):
SCREAMING_SNAKE_CASE_ = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , _lowerCAmelCase )
def lowerCAmelCase_ ( self : Any ):
self.assertIn(_lowerCAmelCase , self.tokenizer.all_special_ids )
SCREAMING_SNAKE_CASE_ = [EN_CODE, 9_037, 33_442, 57, 752, 153, 14, 56, 18, 9, 2]
SCREAMING_SNAKE_CASE_ = self.tokenizer.decode(_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=_lowerCAmelCase )
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
self.assertNotIn(self.tokenizer.eos_token , _lowerCAmelCase )
def lowerCAmelCase_ ( self : Dict ):
SCREAMING_SNAKE_CASE_ = ['def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])' * 20]
self.assertIsInstance(src_text[0] , _lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = 10
SCREAMING_SNAKE_CASE_ = self.tokenizer(_lowerCAmelCase , max_length=_lowerCAmelCase , truncation=_lowerCAmelCase ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , _lowerCAmelCase )
self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase )
def lowerCAmelCase_ ( self : Union[str, Any] ):
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['<mask>', '__java__'] ) , [50_004, 50_001] )
def lowerCAmelCase_ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE_ = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE_ = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = PLBartTokenizer.from_pretrained(_lowerCAmelCase )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , _lowerCAmelCase )
@require_torch
def lowerCAmelCase_ ( self : Dict ):
SCREAMING_SNAKE_CASE_ = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=_lowerCAmelCase , return_tensors='pt' )
SCREAMING_SNAKE_CASE_ = shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
self.assertEqual(batch.input_ids[1][-2:].tolist() , [2, PYTHON_CODE] )
self.assertEqual(batch.decoder_input_ids[1][0] , _lowerCAmelCase )
self.assertEqual(batch.decoder_input_ids[1][-1] , 2 )
self.assertEqual(batch.labels[1][-2:].tolist() , [2, EN_CODE] )
@require_torch
def lowerCAmelCase_ ( self : Any ):
SCREAMING_SNAKE_CASE_ = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=_lowerCAmelCase , truncation=_lowerCAmelCase , max_length=len(self.expected_src_tokens ) , return_tensors='pt' , )
SCREAMING_SNAKE_CASE_ = shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id )
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
self.assertEqual((2, 26) , batch.input_ids.shape )
self.assertEqual((2, 26) , batch.attention_mask.shape )
SCREAMING_SNAKE_CASE_ = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , _lowerCAmelCase )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, PYTHON_CODE] )
def lowerCAmelCase_ ( self : Any ):
SCREAMING_SNAKE_CASE_ = self.tokenizer(self.src_text , padding=_lowerCAmelCase , truncation=_lowerCAmelCase , max_length=3 , return_tensors='pt' )
SCREAMING_SNAKE_CASE_ = self.tokenizer(
text_target=self.tgt_text , padding=_lowerCAmelCase , truncation=_lowerCAmelCase , max_length=10 , return_tensors='pt' )
SCREAMING_SNAKE_CASE_ = targets['input_ids']
SCREAMING_SNAKE_CASE_ = shift_tokens_right(_lowerCAmelCase , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def lowerCAmelCase_ ( self : List[str] ):
SCREAMING_SNAKE_CASE_ = self.tokenizer._build_translation_inputs(
'A test' , return_tensors='pt' , src_lang='en_XX' , tgt_lang='java' )
self.assertEqual(
nested_simplify(_lowerCAmelCase ) , {
# A, test, EOS, en_XX
'input_ids': [[150, 242, 2, 50_003]],
'attention_mask': [[1, 1, 1, 1]],
# java
'forced_bos_token_id': 50_001,
} , ) | 31 | import math
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: int ):
snake_case_ : Any = []
snake_case_ : List[str] = 2
snake_case_ : Optional[int] = int(math.sqrt(lowerCAmelCase_ ) ) # Size of every segment
snake_case_ : str = [True] * (end + 1)
snake_case_ : Any = []
while start <= end:
if temp[start] is True:
in_prime.append(lowerCAmelCase_ )
for i in range(start * start , end + 1 , lowerCAmelCase_ ):
snake_case_ : Union[str, Any] = False
start += 1
prime += in_prime
snake_case_ : Dict = end + 1
snake_case_ : Dict = min(2 * end , lowerCAmelCase_ )
while low <= n:
snake_case_ : Any = [True] * (high - low + 1)
for each in in_prime:
snake_case_ : Optional[Any] = math.floor(low / each ) * each
if t < low:
t += each
for j in range(lowerCAmelCase_ , high + 1 , lowerCAmelCase_ ):
snake_case_ : List[Any] = False
for j in range(len(lowerCAmelCase_ ) ):
if temp[j] is True:
prime.append(j + low )
snake_case_ : int = high + 1
snake_case_ : Union[str, Any] = min(high + end , lowerCAmelCase_ )
return prime
print(sieve(1_0**6))
| 666 | 0 |
import os
import pytest
from transformers.dynamic_module_utils import get_imports
UpperCAmelCase_ = "\nimport os\n"
UpperCAmelCase_ = "\ndef foo():\n import os\n return False\n"
UpperCAmelCase_ = "\ndef foo():\n def bar():\n if True:\n import os\n return False\n return bar()\n"
UpperCAmelCase_ = "\nimport os\n\ntry:\n import bar\nexcept ImportError:\n raise ValueError()\n"
UpperCAmelCase_ = "\nimport os\n\ndef foo():\n try:\n import bar\n except ImportError:\n raise ValueError()\n"
UpperCAmelCase_ = "\nimport os\n\ntry:\n import bar\nexcept (ImportError, AttributeError):\n raise ValueError()\n"
UpperCAmelCase_ = "\nimport os\n\ntry:\n import bar\nexcept ImportError as e:\n raise ValueError()\n"
UpperCAmelCase_ = "\nimport os\n\ntry:\n import bar\nexcept:\n raise ValueError()\n"
UpperCAmelCase_ = "\nimport os\n\ntry:\n import bar\n import baz\nexcept ImportError:\n raise ValueError()\n"
UpperCAmelCase_ = "\nimport os\n\ntry:\n import bar\n import baz\nexcept ImportError:\n x = 1\n raise ValueError()\n"
UpperCAmelCase_ = [
TOP_LEVEL_IMPORT,
IMPORT_IN_FUNCTION,
DEEPLY_NESTED_IMPORT,
TOP_LEVEL_TRY_IMPORT,
GENERIC_EXCEPT_IMPORT,
MULTILINE_TRY_IMPORT,
MULTILINE_BOTH_IMPORT,
MULTIPLE_EXCEPTS_IMPORT,
EXCEPT_AS_IMPORT,
TRY_IMPORT_IN_FUNCTION,
]
@pytest.mark.parametrize('''case''' , SCREAMING_SNAKE_CASE_ )
def A__ ( SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = os.path.join(SCREAMING_SNAKE_CASE_ , '''test_file.py''' )
with open(SCREAMING_SNAKE_CASE_ , '''w''' ) as _tmp_file:
_tmp_file.write(SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase = get_imports(SCREAMING_SNAKE_CASE_ )
assert parsed_imports == ["os"] | 32 | import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ConditionalDetrImageProcessor
class snake_case__ ( unittest.TestCase ):
def __init__( self : List[str] , A__ : List[Any] , A__ : int=7 , A__ : Union[str, Any]=3 , A__ : List[str]=30 , A__ : Optional[int]=4_00 , A__ : Optional[Any]=True , A__ : Optional[int]=None , A__ : Optional[Any]=True , A__ : Any=[0.5, 0.5, 0.5] , A__ : int=[0.5, 0.5, 0.5] , A__ : Any=True , A__ : int=1 / 2_55 , A__ : List[str]=True , ) -> Dict:
'''simple docstring'''
snake_case_ : int = size if size is not None else {"shortest_edge": 18, "longest_edge": 13_33}
snake_case_ : Any = parent
snake_case_ : Optional[int] = batch_size
snake_case_ : List[Any] = num_channels
snake_case_ : Union[str, Any] = min_resolution
snake_case_ : List[Any] = max_resolution
snake_case_ : Tuple = do_resize
snake_case_ : Dict = size
snake_case_ : Optional[Any] = do_normalize
snake_case_ : int = image_mean
snake_case_ : List[Any] = image_std
snake_case_ : Tuple = do_rescale
snake_case_ : Any = rescale_factor
snake_case_ : Optional[int] = do_pad
def UpperCAmelCase__ ( self : int ) -> List[str]:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def UpperCAmelCase__ ( self : Optional[int] , A__ : Optional[int] , A__ : Any=False ) -> Optional[Any]:
'''simple docstring'''
if not batched:
snake_case_ : Any = image_inputs[0]
if isinstance(A__ , Image.Image ):
snake_case_ ,snake_case_ : Dict = image.size
else:
snake_case_ ,snake_case_ : int = image.shape[1], image.shape[2]
if w < h:
snake_case_ : Dict = int(self.size["shortest_edge"] * h / w )
snake_case_ : Optional[int] = self.size["shortest_edge"]
elif w > h:
snake_case_ : Optional[int] = self.size["shortest_edge"]
snake_case_ : str = int(self.size["shortest_edge"] * w / h )
else:
snake_case_ : Optional[int] = self.size["shortest_edge"]
snake_case_ : List[Any] = self.size["shortest_edge"]
else:
snake_case_ : str = []
for image in image_inputs:
snake_case_ ,snake_case_ : Tuple = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
snake_case_ : List[Any] = max(A__ , key=lambda A__ : item[0] )[0]
snake_case_ : int = max(A__ , key=lambda A__ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class snake_case__ ( _UpperCamelCase , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : Optional[int] = ConditionalDetrImageProcessor if is_vision_available() else None
def UpperCAmelCase__ ( self : Tuple ) -> Dict:
'''simple docstring'''
snake_case_ : List[str] = ConditionalDetrImageProcessingTester(self )
@property
def UpperCAmelCase__ ( self : Dict ) -> Tuple:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase__ ( self : Any ) -> Tuple:
'''simple docstring'''
snake_case_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A__ , "image_mean" ) )
self.assertTrue(hasattr(A__ , "image_std" ) )
self.assertTrue(hasattr(A__ , "do_normalize" ) )
self.assertTrue(hasattr(A__ , "do_resize" ) )
self.assertTrue(hasattr(A__ , "size" ) )
def UpperCAmelCase__ ( self : List[str] ) -> Tuple:
'''simple docstring'''
snake_case_ : List[str] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 13_33} )
self.assertEqual(image_processor.do_pad , A__ )
snake_case_ : Optional[int] = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=A__ )
self.assertEqual(image_processor.size , {"shortest_edge": 42, "longest_edge": 84} )
self.assertEqual(image_processor.do_pad , A__ )
def UpperCAmelCase__ ( self : str ) -> Optional[int]:
'''simple docstring'''
pass
def UpperCAmelCase__ ( self : Dict ) -> Tuple:
'''simple docstring'''
snake_case_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case_ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=A__ )
for image in image_inputs:
self.assertIsInstance(A__ , Image.Image )
# Test not batched input
snake_case_ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case_ ,snake_case_ : Optional[Any] = self.image_processor_tester.get_expected_values(A__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case_ ,snake_case_ : List[Any] = self.image_processor_tester.get_expected_values(A__ , batched=A__ )
snake_case_ : int = image_processing(A__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase__ ( self : int ) -> Any:
'''simple docstring'''
snake_case_ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case_ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=A__ , numpify=A__ )
for image in image_inputs:
self.assertIsInstance(A__ , np.ndarray )
# Test not batched input
snake_case_ : int = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case_ ,snake_case_ : List[str] = self.image_processor_tester.get_expected_values(A__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case_ : Optional[int] = image_processing(A__ , return_tensors="pt" ).pixel_values
snake_case_ ,snake_case_ : Dict = self.image_processor_tester.get_expected_values(A__ , batched=A__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase__ ( self : Tuple ) -> str:
'''simple docstring'''
snake_case_ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case_ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=A__ , torchify=A__ )
for image in image_inputs:
self.assertIsInstance(A__ , torch.Tensor )
# Test not batched input
snake_case_ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case_ ,snake_case_ : List[Any] = self.image_processor_tester.get_expected_values(A__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case_ : Any = image_processing(A__ , return_tensors="pt" ).pixel_values
snake_case_ ,snake_case_ : int = self.image_processor_tester.get_expected_values(A__ , batched=A__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def UpperCAmelCase__ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Dict = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
snake_case_ : Optional[Any] = json.loads(f.read() )
snake_case_ : int = {"image_id": 3_97_69, "annotations": target}
# encode them
snake_case_ : Optional[int] = ConditionalDetrImageProcessor.from_pretrained("microsoft/conditional-detr-resnet-50" )
snake_case_ : Any = image_processing(images=A__ , annotations=A__ , return_tensors="pt" )
# verify pixel values
snake_case_ : List[Any] = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["pixel_values"].shape , A__ )
snake_case_ : List[str] = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , A__ , atol=1E-4 ) )
# verify area
snake_case_ : Tuple = torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , A__ ) )
# verify boxes
snake_case_ : Any = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , A__ )
snake_case_ : str = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , A__ , atol=1E-3 ) )
# verify image_id
snake_case_ : List[Any] = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , A__ ) )
# verify is_crowd
snake_case_ : Union[str, Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , A__ ) )
# verify class_labels
snake_case_ : Any = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , A__ ) )
# verify orig_size
snake_case_ : Any = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , A__ ) )
# verify size
snake_case_ : List[str] = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , A__ ) )
@slow
def UpperCAmelCase__ ( self : int ) -> str:
'''simple docstring'''
snake_case_ : Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
snake_case_ : Any = json.loads(f.read() )
snake_case_ : Optional[Any] = {"file_name": "000000039769.png", "image_id": 3_97_69, "segments_info": target}
snake_case_ : int = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
snake_case_ : Union[str, Any] = ConditionalDetrImageProcessor(format="coco_panoptic" )
snake_case_ : str = image_processing(images=A__ , annotations=A__ , masks_path=A__ , return_tensors="pt" )
# verify pixel values
snake_case_ : int = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["pixel_values"].shape , A__ )
snake_case_ : str = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , A__ , atol=1E-4 ) )
# verify area
snake_case_ : Optional[int] = torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , A__ ) )
# verify boxes
snake_case_ : str = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , A__ )
snake_case_ : str = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , A__ , atol=1E-3 ) )
# verify image_id
snake_case_ : List[str] = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , A__ ) )
# verify is_crowd
snake_case_ : Optional[int] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , A__ ) )
# verify class_labels
snake_case_ : Optional[int] = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , A__ ) )
# verify masks
snake_case_ : Union[str, Any] = 82_28_73
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , A__ )
# verify orig_size
snake_case_ : Dict = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , A__ ) )
# verify size
snake_case_ : str = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , A__ ) )
| 666 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowerCamelCase__ : List[Any] = {
"""configuration_rag""": ["""RagConfig"""],
"""retrieval_rag""": ["""RagRetriever"""],
"""tokenization_rag""": ["""RagTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Optional[Any] = [
"""RagModel""",
"""RagPreTrainedModel""",
"""RagSequenceForGeneration""",
"""RagTokenForGeneration""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Any = [
"""TFRagModel""",
"""TFRagPreTrainedModel""",
"""TFRagSequenceForGeneration""",
"""TFRagTokenForGeneration""",
]
if TYPE_CHECKING:
from .configuration_rag import RagConfig
from .retrieval_rag import RagRetriever
from .tokenization_rag import RagTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rag import (
TFRagModel,
TFRagPreTrainedModel,
TFRagSequenceForGeneration,
TFRagTokenForGeneration,
)
else:
import sys
lowerCamelCase__ : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 33 | import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
UpperCAmelCase = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class snake_case__ :
_SCREAMING_SNAKE_CASE : str = field(
default=_UpperCamelCase , metadata={"help": "Model type selected in the list: " + ", ".join(_UpperCamelCase )} )
_SCREAMING_SNAKE_CASE : str = field(
default=_UpperCamelCase , metadata={"help": "The input data dir. Should contain the .json files for the SQuAD task."} )
_SCREAMING_SNAKE_CASE : int = field(
default=1_2_8 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
_SCREAMING_SNAKE_CASE : int = field(
default=1_2_8 , metadata={"help": "When splitting up a long document into chunks, how much stride to take between chunks."} , )
_SCREAMING_SNAKE_CASE : int = field(
default=6_4 , metadata={
"help": (
"The maximum number of tokens for the question. Questions longer than this will "
"be truncated to this length."
)
} , )
_SCREAMING_SNAKE_CASE : int = field(
default=3_0 , metadata={
"help": (
"The maximum length of an answer that can be generated. This is needed because the start "
"and end predictions are not conditioned on one another."
)
} , )
_SCREAMING_SNAKE_CASE : bool = field(
default=_UpperCamelCase , metadata={"help": "Overwrite the cached training and evaluation sets"} )
_SCREAMING_SNAKE_CASE : bool = field(
default=_UpperCamelCase , metadata={"help": "If true, the SQuAD examples contain some that do not have an answer."} )
_SCREAMING_SNAKE_CASE : float = field(
default=0.0 , metadata={"help": "If null_score - best_non_null is greater than the threshold predict null."} )
_SCREAMING_SNAKE_CASE : int = field(
default=2_0 , metadata={"help": "If null_score - best_non_null is greater than the threshold predict null."} )
_SCREAMING_SNAKE_CASE : int = field(
default=0 , metadata={
"help": (
"language id of input for language-specific xlm models (see"
" tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)"
)
} , )
_SCREAMING_SNAKE_CASE : int = field(default=1 , metadata={"help": "multiple threads for converting example to features"} )
class snake_case__ ( _UpperCamelCase ):
_SCREAMING_SNAKE_CASE : Tuple = "train"
_SCREAMING_SNAKE_CASE : Any = "dev"
class snake_case__ ( _UpperCamelCase ):
_SCREAMING_SNAKE_CASE : SquadDataTrainingArguments
_SCREAMING_SNAKE_CASE : List[SquadFeatures]
_SCREAMING_SNAKE_CASE : Split
_SCREAMING_SNAKE_CASE : bool
def __init__( self : str , A__ : SquadDataTrainingArguments , A__ : PreTrainedTokenizer , A__ : Optional[int] = None , A__ : Union[str, Split] = Split.train , A__ : Optional[bool] = False , A__ : Optional[str] = None , A__ : Optional[str] = "pt" , ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Tuple = args
snake_case_ : int = is_language_sensitive
snake_case_ : int = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(A__ , A__ ):
try:
snake_case_ : List[str] = Split[mode]
except KeyError:
raise KeyError("mode is not a valid split name" )
snake_case_ : Tuple = mode
# Load data features from cache or dataset file
snake_case_ : Dict = "v2" if args.version_2_with_negative else "v1"
snake_case_ : List[Any] = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f"cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}" , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
snake_case_ : List[Any] = cached_features_file + ".lock"
with FileLock(A__ ):
if os.path.exists(A__ ) and not args.overwrite_cache:
snake_case_ : int = time.time()
snake_case_ : List[Any] = torch.load(A__ )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
snake_case_ : Tuple = self.old_features["features"]
snake_case_ : List[str] = self.old_features.get("dataset" , A__ )
snake_case_ : Tuple = self.old_features.get("examples" , A__ )
logger.info(
f"Loading features from cached file {cached_features_file} [took %.3f s]" , time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
f"Deleting cached file {cached_features_file} will allow dataset and examples to be cached in"
" future run" )
else:
if mode == Split.dev:
snake_case_ : Tuple = self.processor.get_dev_examples(args.data_dir )
else:
snake_case_ : Tuple = self.processor.get_train_examples(args.data_dir )
snake_case_ ,snake_case_ : Optional[Any] = squad_convert_examples_to_features(
examples=self.examples , tokenizer=A__ , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=A__ , )
snake_case_ : Any = time.time()
torch.save(
{"features": self.features, "dataset": self.dataset, "examples": self.examples} , A__ , )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f"Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]" )
def __len__( self : str ) -> Dict:
'''simple docstring'''
return len(self.features )
def __getitem__( self : Optional[int] , A__ : Optional[int] ) -> Dict[str, torch.Tensor]:
'''simple docstring'''
snake_case_ : Any = self.features[i]
snake_case_ : Optional[int] = torch.tensor(feature.input_ids , dtype=torch.long )
snake_case_ : Union[str, Any] = torch.tensor(feature.attention_mask , dtype=torch.long )
snake_case_ : List[Any] = torch.tensor(feature.token_type_ids , dtype=torch.long )
snake_case_ : List[Any] = torch.tensor(feature.cls_index , dtype=torch.long )
snake_case_ : str = torch.tensor(feature.p_mask , dtype=torch.float )
snake_case_ : str = torch.tensor(feature.is_impossible , dtype=torch.float )
snake_case_ : Optional[int] = {
"input_ids": input_ids,
"attention_mask": attention_mask,
"token_type_ids": token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({"cls_index": cls_index, "p_mask": p_mask} )
if self.args.version_2_with_negative:
inputs.update({"is_impossible": is_impossible} )
if self.is_language_sensitive:
inputs.update({"langs": (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
snake_case_ : Any = torch.tensor(feature.start_position , dtype=torch.long )
snake_case_ : List[Any] = torch.tensor(feature.end_position , dtype=torch.long )
inputs.update({"start_positions": start_positions, "end_positions": end_positions} )
return inputs
| 666 | 0 |
"""simple docstring"""
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
A_ = ['''audio_values''', '''audio_mask''']
def __init__( self , lowerCamelCase_=2_0_4_8 , lowerCamelCase_=1 , lowerCamelCase_=[1_6, 1_6] , lowerCamelCase_=1_2_8 , lowerCamelCase_=4_4_1_0_0 , lowerCamelCase_=8_6 , lowerCamelCase_=2_0_4_8 , lowerCamelCase_=0.0 , **lowerCamelCase_ , ) -> Dict:
super().__init__(
feature_size=lowerCamelCase_ , sampling_rate=lowerCamelCase_ , padding_value=lowerCamelCase_ , **lowerCamelCase_ , )
UpperCamelCase = spectrogram_length
UpperCamelCase = num_channels
UpperCamelCase = patch_size
UpperCamelCase = feature_size // self.patch_size[1]
UpperCamelCase = n_fft
UpperCamelCase = sampling_rate // hop_length_to_sampling_rate
UpperCamelCase = sampling_rate
UpperCamelCase = padding_value
UpperCamelCase = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=lowerCamelCase_ , min_frequency=0.0 , max_frequency=2_2050.0 , sampling_rate=lowerCamelCase_ , norm='''slaney''' , mel_scale='''slaney''' , ).T
def UpperCAmelCase__ ( self , lowerCamelCase_) -> np.ndarray:
UpperCamelCase = spectrogram(
lowerCamelCase_ , window_function(self.n_fft , '''hann''') , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel='''dB''' , db_range=80.0 , )
UpperCamelCase = log_spec[:, :-1]
UpperCamelCase = log_spec - 20.0
UpperCamelCase = np.clip(log_spec / 40.0 , -2.0 , 0.0) + 1.0
return log_spec
def __call__( self , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = True , lowerCamelCase_ = None , lowerCamelCase_ = False , lowerCamelCase_ = False , **lowerCamelCase_ , ) -> BatchFeature:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
'''This feature extractor is set to support sampling rate'''
F' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled'
F' with {self.sampling_rate} and not {sampling_rate}.')
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''')
UpperCamelCase = isinstance(lowerCamelCase_ , np.ndarray) and len(raw_speech.shape) > 1
if is_batched_numpy and len(raw_speech.shape) > 2:
raise ValueError(F'Only mono-channel audio is supported for input to {self}')
UpperCamelCase = is_batched_numpy or (
isinstance(lowerCamelCase_ , (list, tuple)) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list)))
)
if is_batched:
UpperCamelCase = [np.asarray([speech] , dtype=np.floataa).T for speech in raw_speech]
elif not is_batched and not isinstance(lowerCamelCase_ , np.ndarray):
UpperCamelCase = np.asarray(lowerCamelCase_ , dtype=np.floataa)
elif isinstance(lowerCamelCase_ , np.ndarray) and raw_speech.dtype is np.dtype(np.floataa):
UpperCamelCase = raw_speech.astype(np.floataa)
# always return batch
if not is_batched:
UpperCamelCase = [np.asarray([raw_speech]).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
UpperCamelCase = [
self._np_extract_fbank_features(waveform.squeeze()).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , lowerCamelCase_):
UpperCamelCase = [np.asarray(lowerCamelCase_ , dtype=np.floataa) for feature in audio_features]
# Create audio attention mask
UpperCamelCase = max(
[ceil(feature.shape[0] / self.patch_size[0]) * self.freq_len for feature in audio_features]) # The maximum number of audio patches in a batch
if return_attention_mask:
UpperCamelCase = [
(ceil(feature.shape[0] / self.patch_size[0]) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0]) * self.freq_len) * [0]
for feature in audio_features
]
UpperCamelCase = np.array(lowerCamelCase_).astype(np.floataa)
# convert into correct format for padding
UpperCamelCase = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
UpperCamelCase = np.ones([len(lowerCamelCase_), 1, max_time_len, self.feature_size]).astype(np.floataa)
UpperCamelCase = padded_audio_features * self.padding_value
for i in range(len(lowerCamelCase_)):
UpperCamelCase = audio_features[i]
UpperCamelCase = feature
# return as BatchFeature
if return_attention_mask:
UpperCamelCase = {'''audio_values''': padded_audio_features, '''audio_mask''': audio_mask}
else:
UpperCamelCase = {'''audio_values''': padded_audio_features}
UpperCamelCase = BatchFeature(data=lowerCamelCase_ , tensor_type=lowerCamelCase_)
return encoded_inputs | 34 | import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
"microsoft/git-base": "https://huggingface.co/microsoft/git-base/resolve/main/config.json",
}
class snake_case__ ( _UpperCamelCase ):
_SCREAMING_SNAKE_CASE : Dict = "git_vision_model"
def __init__( self : int , A__ : Union[str, Any]=7_68 , A__ : List[Any]=30_72 , A__ : Tuple=12 , A__ : Optional[Any]=12 , A__ : Optional[int]=3 , A__ : List[str]=2_24 , A__ : Dict=16 , A__ : int="quick_gelu" , A__ : Any=1E-5 , A__ : Tuple=0.0 , A__ : Optional[int]=0.02 , **A__ : List[str] , ) -> Optional[int]:
'''simple docstring'''
super().__init__(**A__ )
snake_case_ : Optional[Any] = hidden_size
snake_case_ : str = intermediate_size
snake_case_ : Optional[Any] = num_hidden_layers
snake_case_ : int = num_attention_heads
snake_case_ : Optional[int] = num_channels
snake_case_ : Union[str, Any] = patch_size
snake_case_ : List[str] = image_size
snake_case_ : List[Any] = initializer_range
snake_case_ : Any = attention_dropout
snake_case_ : Any = layer_norm_eps
snake_case_ : int = hidden_act
@classmethod
def UpperCAmelCase__ ( cls : List[Any] , A__ : Union[str, os.PathLike] , **A__ : Optional[int] ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(A__ )
snake_case_ ,snake_case_ : Tuple = cls.get_config_dict(A__ , **A__ )
# get the vision config dict if we are loading from GITConfig
if config_dict.get("model_type" ) == "git":
snake_case_ : Any = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(A__ , **A__ )
class snake_case__ ( _UpperCamelCase ):
_SCREAMING_SNAKE_CASE : Optional[Any] = "git"
def __init__( self : Any , A__ : List[str]=None , A__ : List[str]=3_05_22 , A__ : Tuple=7_68 , A__ : Tuple=6 , A__ : str=12 , A__ : Any=30_72 , A__ : List[str]="gelu" , A__ : int=0.1 , A__ : Dict=0.1 , A__ : Any=10_24 , A__ : Optional[Any]=0.02 , A__ : Optional[Any]=1E-12 , A__ : Dict=0 , A__ : Any="absolute" , A__ : Tuple=True , A__ : Any=False , A__ : Tuple=1_01 , A__ : Tuple=1_02 , A__ : List[Any]=None , **A__ : List[str] , ) -> int:
'''simple docstring'''
super().__init__(bos_token_id=A__ , eos_token_id=A__ , pad_token_id=A__ , **A__ )
if vision_config is None:
snake_case_ : int = {}
logger.info("vision_config is None. initializing the GitVisionConfig with default values." )
snake_case_ : str = GitVisionConfig(**A__ )
snake_case_ : int = vocab_size
snake_case_ : List[Any] = hidden_size
snake_case_ : Tuple = num_hidden_layers
snake_case_ : List[Any] = num_attention_heads
snake_case_ : Any = hidden_act
snake_case_ : Dict = intermediate_size
snake_case_ : Any = hidden_dropout_prob
snake_case_ : Any = attention_probs_dropout_prob
snake_case_ : Union[str, Any] = max_position_embeddings
snake_case_ : List[str] = initializer_range
snake_case_ : List[str] = layer_norm_eps
snake_case_ : Any = position_embedding_type
snake_case_ : Union[str, Any] = use_cache
snake_case_ : str = tie_word_embeddings
snake_case_ : List[Any] = num_image_with_embedding
snake_case_ : Dict = bos_token_id
snake_case_ : int = eos_token_id
def UpperCAmelCase__ ( self : Any ) -> int:
'''simple docstring'''
snake_case_ : Tuple = copy.deepcopy(self.__dict__ )
snake_case_ : Optional[int] = self.vision_config.to_dict()
snake_case_ : Tuple = self.__class__.model_type
return output
| 666 | 0 |
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class lowercase ( _UpperCAmelCase ):
def __init__( self : Optional[Any] , _lowercase : NestedDataStructureLike[PathLike] , _lowercase : Optional[NamedSplit] = None , _lowercase : Optional[Features] = None , _lowercase : str = None , _lowercase : bool = False , _lowercase : bool = False , _lowercase : Optional[str] = None , _lowercase : Optional[int] = None , **_lowercase : str , ):
super().__init__(
_lowercase , split=_lowercase , features=_lowercase , cache_dir=_lowercase , keep_in_memory=_lowercase , streaming=_lowercase , num_proc=_lowercase , **_lowercase , )
SCREAMING_SNAKE_CASE__ : Tuple = field
SCREAMING_SNAKE_CASE__ : List[Any] = path_or_paths if isinstance(_lowercase , _lowercase ) else {self.split: path_or_paths}
SCREAMING_SNAKE_CASE__ : Any = Json(
cache_dir=_lowercase , data_files=_lowercase , features=_lowercase , field=_lowercase , **_lowercase , )
def lowercase__ ( self : int ):
# Build iterable dataset
if self.streaming:
SCREAMING_SNAKE_CASE__ : List[Any] = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
SCREAMING_SNAKE_CASE__ : Any = None
SCREAMING_SNAKE_CASE__ : List[Any] = None
SCREAMING_SNAKE_CASE__ : List[Any] = None
SCREAMING_SNAKE_CASE__ : List[Any] = None
self.builder.download_and_prepare(
download_config=_lowercase , download_mode=_lowercase , verification_mode=_lowercase , base_path=_lowercase , num_proc=self.num_proc , )
SCREAMING_SNAKE_CASE__ : List[Any] = self.builder.as_dataset(
split=self.split , verification_mode=_lowercase , in_memory=self.keep_in_memory )
return dataset
class lowercase :
def __init__( self : List[str] , _lowercase : Dataset , _lowercase : Union[PathLike, BinaryIO] , _lowercase : Optional[int] = None , _lowercase : Optional[int] = None , **_lowercase : List[Any] , ):
if num_proc is not None and num_proc <= 0:
raise ValueError(f"""num_proc {num_proc} must be an integer > 0.""" )
SCREAMING_SNAKE_CASE__ : List[str] = dataset
SCREAMING_SNAKE_CASE__ : Tuple = path_or_buf
SCREAMING_SNAKE_CASE__ : Any = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
SCREAMING_SNAKE_CASE__ : Dict = num_proc
SCREAMING_SNAKE_CASE__ : Any = '''utf-8'''
SCREAMING_SNAKE_CASE__ : int = to_json_kwargs
def lowercase__ ( self : List[str] ):
SCREAMING_SNAKE_CASE__ : Tuple = self.to_json_kwargs.pop('''path_or_buf''' , _lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.to_json_kwargs.pop('''orient''' , '''records''' )
SCREAMING_SNAKE_CASE__ : Optional[int] = self.to_json_kwargs.pop('''lines''' , True if orient == '''records''' else False )
SCREAMING_SNAKE_CASE__ : List[Any] = self.to_json_kwargs.pop('''index''' , False if orient in ['''split''', '''table'''] else True )
SCREAMING_SNAKE_CASE__ : str = self.to_json_kwargs.pop('''compression''' , _lowercase )
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(f"""`datasets` currently does not support {compression} compression""" )
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with fsspec.open(self.path_or_buf , '''wb''' , compression=_lowercase ) as buffer:
SCREAMING_SNAKE_CASE__ : Tuple = self._write(file_obj=_lowercase , orient=_lowercase , lines=_lowercase , index=_lowercase , **self.to_json_kwargs )
else:
if compression:
raise NotImplementedError(
f"""The compression parameter is not supported when writing to a buffer, but compression={compression}"""
''' was passed. Please provide a local path instead.''' )
SCREAMING_SNAKE_CASE__ : int = self._write(
file_obj=self.path_or_buf , orient=_lowercase , lines=_lowercase , index=_lowercase , **self.to_json_kwargs )
return written
def lowercase__ ( self : Dict , _lowercase : Optional[Any] ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = args
SCREAMING_SNAKE_CASE__ : Tuple = query_table(
table=self.dataset.data , key=slice(_lowercase , offset + self.batch_size ) , indices=self.dataset._indices , )
SCREAMING_SNAKE_CASE__ : List[Any] = batch.to_pandas().to_json(
path_or_buf=_lowercase , orient=_lowercase , lines=_lowercase , index=_lowercase , **_lowercase )
if not json_str.endswith('''\n''' ):
json_str += "\n"
return json_str.encode(self.encoding )
def lowercase__ ( self : Union[str, Any] , _lowercase : BinaryIO , _lowercase : Dict , _lowercase : str , _lowercase : Any , **_lowercase : Union[str, Any] , ):
SCREAMING_SNAKE_CASE__ : Dict = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating json from Arrow format''' , ):
SCREAMING_SNAKE_CASE__ : List[Any] = self._batch_json((offset, orient, lines, index, to_json_kwargs) )
written += file_obj.write(_lowercase )
else:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , _lowercase , _lowercase )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating json from Arrow format''' , ):
written += file_obj.write(_lowercase )
return written
| 35 | def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: str , lowerCAmelCase_: str ):
def get_matched_characters(lowerCAmelCase_: str , lowerCAmelCase_: str ) -> str:
snake_case_ : Tuple = []
snake_case_ : Tuple = min(len(_stra ) , len(_stra ) ) // 2
for i, l in enumerate(_stra ):
snake_case_ : str = int(max(0 , i - limit ) )
snake_case_ : Optional[int] = int(min(i + limit + 1 , len(_stra ) ) )
if l in _stra[left:right]:
matched.append(lowerCAmelCase_ )
snake_case_ : List[Any] = f"{_stra[0:_stra.index(lowerCAmelCase_ )]} {_stra[_stra.index(lowerCAmelCase_ ) + 1:]}"
return "".join(lowerCAmelCase_ )
# matching characters
snake_case_ : List[Any] = get_matched_characters(lowerCAmelCase_ , lowerCAmelCase_ )
snake_case_ : int = get_matched_characters(lowerCAmelCase_ , lowerCAmelCase_ )
snake_case_ : Optional[int] = len(lowerCAmelCase_ )
# transposition
snake_case_ : List[str] = (
len([(ca, ca) for ca, ca in zip(lowerCAmelCase_ , lowerCAmelCase_ ) if ca != ca] ) // 2
)
if not match_count:
snake_case_ : str = 0.0
else:
snake_case_ : Optional[Any] = (
1
/ 3
* (
match_count / len(lowerCAmelCase_ )
+ match_count / len(lowerCAmelCase_ )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
snake_case_ : Optional[Any] = 0
for ca, ca in zip(stra[:4] , stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler("hello", "world"))
| 666 | 0 |
from math import pi
def lowercase ( __A : int , __A : int ) -> float:
'''simple docstring'''
return 2 * pi * radius * (angle / 360)
if __name__ == "__main__":
print(arc_length(90, 10))
| 36 | import argparse
import os
from pathlib import Path
import torch
from bark.generation import _load_model as _bark_load_model
from huggingface_hub import hf_hub_download
from transformers import EncodecConfig, EncodecModel, set_seed
from transformers.models.bark.configuration_bark import (
BarkCoarseConfig,
BarkConfig,
BarkFineConfig,
BarkSemanticConfig,
)
from transformers.models.bark.generation_configuration_bark import (
BarkCoarseGenerationConfig,
BarkFineGenerationConfig,
BarkGenerationConfig,
BarkSemanticGenerationConfig,
)
from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase = logging.get_logger(__name__)
set_seed(7_7_0)
UpperCAmelCase = {
"c_attn": "att_proj",
"c_proj": "out_proj",
"c_fc": "in_proj",
"transformer.": "",
"h.": "layers.",
"ln_1": "layernorm_1",
"ln_2": "layernorm_2",
"ln_f": "layernorm_final",
"wpe": "position_embeds_layer",
"wte": "input_embeds_layer",
}
UpperCAmelCase = {
"text_small": {
"repo_id": "suno/bark",
"file_name": "text.pt",
},
"coarse_small": {
"repo_id": "suno/bark",
"file_name": "coarse.pt",
},
"fine_small": {
"repo_id": "suno/bark",
"file_name": "fine.pt",
},
"text": {
"repo_id": "suno/bark",
"file_name": "text_2.pt",
},
"coarse": {
"repo_id": "suno/bark",
"file_name": "coarse_2.pt",
},
"fine": {
"repo_id": "suno/bark",
"file_name": "fine_2.pt",
},
}
UpperCAmelCase = os.path.dirname(os.path.abspath(__file__))
UpperCAmelCase = os.path.join(os.path.expanduser("~"), ".cache")
UpperCAmelCase = os.path.join(os.getenv("XDG_CACHE_HOME", default_cache_dir), "suno", "bark_v0")
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: int , lowerCAmelCase_: List[str]=False ):
snake_case_ : Union[str, Any] = model_type
if use_small:
key += "_small"
return os.path.join(lowerCAmelCase_ , REMOTE_MODEL_PATHS[key]["file_name"] )
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: str , lowerCAmelCase_: List[str] ):
os.makedirs(lowerCAmelCase_ , exist_ok=lowerCAmelCase_ )
hf_hub_download(repo_id=lowerCAmelCase_ , filename=lowerCAmelCase_ , local_dir=lowerCAmelCase_ )
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: Any , lowerCAmelCase_: Dict , lowerCAmelCase_: List[str]=False , lowerCAmelCase_: Dict="text" ):
if model_type == "text":
snake_case_ : int = BarkSemanticModel
snake_case_ : str = BarkSemanticConfig
snake_case_ : Optional[Any] = BarkSemanticGenerationConfig
elif model_type == "coarse":
snake_case_ : str = BarkCoarseModel
snake_case_ : Optional[int] = BarkCoarseConfig
snake_case_ : Any = BarkCoarseGenerationConfig
elif model_type == "fine":
snake_case_ : Optional[int] = BarkFineModel
snake_case_ : Tuple = BarkFineConfig
snake_case_ : List[str] = BarkFineGenerationConfig
else:
raise NotImplementedError()
snake_case_ : Optional[Any] = f"{model_type}_small" if use_small else model_type
snake_case_ : Any = REMOTE_MODEL_PATHS[model_key]
if not os.path.exists(lowerCAmelCase_ ):
logger.info(f"{model_type} model not found, downloading into `{CACHE_DIR}`." )
_download(model_info["repo_id"] , model_info["file_name"] )
snake_case_ : Any = torch.load(lowerCAmelCase_ , map_location=lowerCAmelCase_ )
# this is a hack
snake_case_ : Union[str, Any] = checkpoint["model_args"]
if "input_vocab_size" not in model_args:
snake_case_ : str = model_args["vocab_size"]
snake_case_ : Union[str, Any] = model_args["vocab_size"]
del model_args["vocab_size"]
# convert Bark model arguments to HF Bark model arguments
snake_case_ : Union[str, Any] = model_args.pop("n_head" )
snake_case_ : int = model_args.pop("n_embd" )
snake_case_ : Any = model_args.pop("n_layer" )
snake_case_ : List[str] = ConfigClass(**checkpoint["model_args"] )
snake_case_ : Optional[Any] = ModelClass(config=lowerCAmelCase_ )
snake_case_ : Tuple = GenerationConfigClass()
snake_case_ : List[str] = model_generation_config
snake_case_ : Optional[int] = checkpoint["model"]
# fixup checkpoint
snake_case_ : Optional[int] = "_orig_mod."
for k, v in list(state_dict.items() ):
if k.startswith(lowerCAmelCase_ ):
# replace part of the key with corresponding layer name in HF implementation
snake_case_ : Tuple = k[len(lowerCAmelCase_ ) :]
for old_layer_name in new_layer_name_dict:
snake_case_ : int = new_k.replace(lowerCAmelCase_ , new_layer_name_dict[old_layer_name] )
snake_case_ : int = state_dict.pop(lowerCAmelCase_ )
snake_case_ : Optional[int] = set(state_dict.keys() ) - set(model.state_dict().keys() )
snake_case_ : str = {k for k in extra_keys if not k.endswith(".attn.bias" )}
snake_case_ : Any = set(model.state_dict().keys() ) - set(state_dict.keys() )
snake_case_ : List[Any] = {k for k in missing_keys if not k.endswith(".attn.bias" )}
if len(lowerCAmelCase_ ) != 0:
raise ValueError(f"extra keys found: {extra_keys}" )
if len(lowerCAmelCase_ ) != 0:
raise ValueError(f"missing keys: {missing_keys}" )
model.load_state_dict(lowerCAmelCase_ , strict=lowerCAmelCase_ )
snake_case_ : str = model.num_parameters(exclude_embeddings=lowerCAmelCase_ )
snake_case_ : Union[str, Any] = checkpoint["best_val_loss"].item()
logger.info(f"model loaded: {round(n_params/1e6 , 1 )}M params, {round(lowerCAmelCase_ , 3 )} loss" )
model.eval()
model.to(lowerCAmelCase_ )
del checkpoint, state_dict
return model
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: List[Any] , lowerCAmelCase_: str=False , lowerCAmelCase_: int="text" ):
if model_type not in ("text", "coarse", "fine"):
raise NotImplementedError()
snake_case_ : int = "cpu" # do conversion on cpu
snake_case_ : Optional[Any] = _get_ckpt_path(lowerCAmelCase_ , use_small=lowerCAmelCase_ )
snake_case_ : Tuple = _load_model(lowerCAmelCase_ , lowerCAmelCase_ , model_type=lowerCAmelCase_ , use_small=lowerCAmelCase_ )
# load bark initial model
snake_case_ : int = _bark_load_model(lowerCAmelCase_ , "cpu" , model_type=lowerCAmelCase_ , use_small=lowerCAmelCase_ )
if model_type == "text":
snake_case_ : Union[str, Any] = bark_model["model"]
if model.num_parameters(exclude_embeddings=lowerCAmelCase_ ) != bark_model.get_num_params():
raise ValueError("initial and new models don't have the same number of parameters" )
# check if same output as the bark model
snake_case_ : Optional[Any] = 5
snake_case_ : Optional[int] = 1_0
if model_type in ["text", "coarse"]:
snake_case_ : Optional[Any] = torch.randint(2_5_6 , (batch_size, sequence_length) , dtype=torch.int )
snake_case_ : str = bark_model(lowerCAmelCase_ )[0]
snake_case_ : Tuple = model(lowerCAmelCase_ )
# take last logits
snake_case_ : List[str] = output_new_model_total.logits[:, [-1], :]
else:
snake_case_ : Optional[int] = 3
snake_case_ : str = 8
snake_case_ : List[str] = torch.randint(2_5_6 , (batch_size, sequence_length, n_codes_total) , dtype=torch.int )
snake_case_ : Any = model(lowerCAmelCase_ , lowerCAmelCase_ )
snake_case_ : Union[str, Any] = bark_model(lowerCAmelCase_ , lowerCAmelCase_ )
snake_case_ : Optional[int] = output_new_model_total.logits
# output difference should come from the difference of self-attention implementation design
if output_new_model.shape != output_old_model.shape:
raise ValueError("initial and new outputs don't have the same shape" )
if (output_new_model - output_old_model).abs().max().item() > 1e-3:
raise ValueError("initial and new outputs are not equal" )
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
model.save_pretrained(lowerCAmelCase_ )
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: Tuple , lowerCAmelCase_: List[str] , lowerCAmelCase_: Any , lowerCAmelCase_: List[Any] , lowerCAmelCase_: int , lowerCAmelCase_: Optional[Any] , ):
snake_case_ : Optional[Any] = os.path.join(lowerCAmelCase_ , lowerCAmelCase_ )
snake_case_ : Optional[Any] = BarkSemanticConfig.from_pretrained(os.path.join(lowerCAmelCase_ , "config.json" ) )
snake_case_ : List[Any] = BarkCoarseConfig.from_pretrained(os.path.join(lowerCAmelCase_ , "config.json" ) )
snake_case_ : List[str] = BarkFineConfig.from_pretrained(os.path.join(lowerCAmelCase_ , "config.json" ) )
snake_case_ : List[Any] = EncodecConfig.from_pretrained("facebook/encodec_24khz" )
snake_case_ : List[str] = BarkSemanticModel.from_pretrained(lowerCAmelCase_ )
snake_case_ : Optional[Any] = BarkCoarseModel.from_pretrained(lowerCAmelCase_ )
snake_case_ : Tuple = BarkFineModel.from_pretrained(lowerCAmelCase_ )
snake_case_ : Union[str, Any] = EncodecModel.from_pretrained("facebook/encodec_24khz" )
snake_case_ : Tuple = BarkConfig.from_sub_model_configs(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
snake_case_ : List[Any] = BarkGenerationConfig.from_sub_model_configs(
semantic.generation_config , coarseAcoustic.generation_config , fineAcoustic.generation_config )
snake_case_ : Optional[int] = BarkModel(lowerCAmelCase_ )
snake_case_ : int = semantic
snake_case_ : List[str] = coarseAcoustic
snake_case_ : str = fineAcoustic
snake_case_ : Optional[Any] = codec
snake_case_ : Any = bark_generation_config
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
bark.save_pretrained(lowerCAmelCase_ , repo_id=lowerCAmelCase_ , push_to_hub=lowerCAmelCase_ )
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument("model_type", type=str, help="text, coarse or fine.")
parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--is_small", action="store_true", help="convert the small version instead of the large.")
UpperCAmelCase = parser.parse_args()
load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small)
| 666 | 0 |
from math import sqrt
def UpperCamelCase_ ( __a = 1_000_000 ) -> int:
a__ : int = 0
a__ : int = 0
a__ : int
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(__a , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(f"""{solution() = }""")
| 37 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase = {
"configuration_upernet": ["UperNetConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
"UperNetForSemanticSegmentation",
"UperNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_upernet import UperNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_upernet import UperNetForSemanticSegmentation, UperNetPreTrainedModel
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 666 | 0 |
'''simple docstring'''
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = (PNDMScheduler,)
lowerCamelCase__ = (('''num_inference_steps''', 50),)
def __UpperCamelCase ( self , **__SCREAMING_SNAKE_CASE ):
snake_case__ : List[str] = {
"""num_train_timesteps""": 1_0_0_0,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
}
config.update(**__SCREAMING_SNAKE_CASE )
return config
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE=0 , **__SCREAMING_SNAKE_CASE ):
snake_case__ : List[str] = dict(self.forward_default_kwargs )
snake_case__ : Union[str, Any] = kwargs.pop("""num_inference_steps""" , __SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = self.dummy_sample
snake_case__ : Any = 0.1 * sample
snake_case__ : Tuple = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
snake_case__ : Dict = self.get_scheduler_config(**__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[int] = scheduler_class(**__SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(__SCREAMING_SNAKE_CASE )
# copy over dummy past residuals
snake_case__ : str = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = scheduler_class.from_pretrained(__SCREAMING_SNAKE_CASE )
new_scheduler.set_timesteps(__SCREAMING_SNAKE_CASE )
# copy over dummy past residuals
snake_case__ : Any = dummy_past_residuals[:]
snake_case__ : int = scheduler.step_prk(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample
snake_case__ : str = new_scheduler.step_prk(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
snake_case__ : Tuple = scheduler.step_plms(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample
snake_case__ : Optional[Any] = new_scheduler.step_plms(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def __UpperCamelCase ( self ):
pass
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE=0 , **__SCREAMING_SNAKE_CASE ):
snake_case__ : List[Any] = dict(self.forward_default_kwargs )
snake_case__ : str = kwargs.pop("""num_inference_steps""" , __SCREAMING_SNAKE_CASE )
snake_case__ : str = self.dummy_sample
snake_case__ : Optional[Any] = 0.1 * sample
snake_case__ : int = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
snake_case__ : List[Any] = self.get_scheduler_config()
snake_case__ : str = scheduler_class(**__SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(__SCREAMING_SNAKE_CASE )
# copy over dummy past residuals (must be after setting timesteps)
snake_case__ : Dict = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__SCREAMING_SNAKE_CASE )
snake_case__ : Union[str, Any] = scheduler_class.from_pretrained(__SCREAMING_SNAKE_CASE )
# copy over dummy past residuals
new_scheduler.set_timesteps(__SCREAMING_SNAKE_CASE )
# copy over dummy past residual (must be after setting timesteps)
snake_case__ : List[str] = dummy_past_residuals[:]
snake_case__ : int = scheduler.step_prk(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample
snake_case__ : Dict = new_scheduler.step_prk(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
snake_case__ : int = scheduler.step_plms(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample
snake_case__ : int = new_scheduler.step_plms(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def __UpperCamelCase ( self , **__SCREAMING_SNAKE_CASE ):
snake_case__ : Any = self.scheduler_classes[0]
snake_case__ : int = self.get_scheduler_config(**__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = scheduler_class(**__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[int] = 1_0
snake_case__ : int = self.dummy_model()
snake_case__ : Any = self.dummy_sample_deter
scheduler.set_timesteps(__SCREAMING_SNAKE_CASE )
for i, t in enumerate(scheduler.prk_timesteps ):
snake_case__ : Dict = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = scheduler.step_prk(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).prev_sample
for i, t in enumerate(scheduler.plms_timesteps ):
snake_case__ : str = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = scheduler.step_plms(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).prev_sample
return sample
def __UpperCamelCase ( self ):
snake_case__ : str = dict(self.forward_default_kwargs )
snake_case__ : str = kwargs.pop("""num_inference_steps""" , __SCREAMING_SNAKE_CASE )
for scheduler_class in self.scheduler_classes:
snake_case__ : Optional[Any] = self.get_scheduler_config()
snake_case__ : str = scheduler_class(**__SCREAMING_SNAKE_CASE )
snake_case__ : Any = self.dummy_sample
snake_case__ : Tuple = 0.1 * sample
if num_inference_steps is not None and hasattr(__SCREAMING_SNAKE_CASE , """set_timesteps""" ):
scheduler.set_timesteps(__SCREAMING_SNAKE_CASE )
elif num_inference_steps is not None and not hasattr(__SCREAMING_SNAKE_CASE , """set_timesteps""" ):
snake_case__ : int = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
snake_case__ : Any = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
snake_case__ : Any = dummy_past_residuals[:]
snake_case__ : Optional[Any] = scheduler.step_prk(__SCREAMING_SNAKE_CASE , 0 , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample
snake_case__ : Optional[Any] = scheduler.step_prk(__SCREAMING_SNAKE_CASE , 1 , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
snake_case__ : Tuple = scheduler.step_plms(__SCREAMING_SNAKE_CASE , 0 , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample
snake_case__ : Dict = scheduler.step_plms(__SCREAMING_SNAKE_CASE , 1 , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def __UpperCamelCase ( self ):
for timesteps in [1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = self.scheduler_classes[0]
snake_case__ : Optional[Any] = self.get_scheduler_config(steps_offset=1 )
snake_case__ : Tuple = scheduler_class(**__SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(1_0 )
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[9_0_1, 8_5_1, 8_5_1, 8_0_1, 8_0_1, 7_5_1, 7_5_1, 7_0_1, 7_0_1, 6_5_1, 6_5_1, 6_0_1, 6_0_1, 5_0_1, 4_0_1, 3_0_1, 2_0_1, 1_0_1, 1] ) , )
def __UpperCamelCase ( self ):
for beta_start, beta_end in zip([0.0001, 0.001] , [0.002, 0.02] ):
self.check_over_configs(beta_start=__SCREAMING_SNAKE_CASE , beta_end=__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
for t in [1, 5, 1_0]:
self.check_over_forward(time_step=__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
for t, num_inference_steps in zip([1, 5, 1_0] , [1_0, 5_0, 1_0_0] ):
self.check_over_forward(num_inference_steps=__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
# earlier version of set_timesteps() caused an error indexing alpha's with inference steps as power of 3
snake_case__ : List[str] = 2_7
for scheduler_class in self.scheduler_classes:
snake_case__ : str = self.dummy_sample
snake_case__ : str = 0.1 * sample
snake_case__ : int = self.get_scheduler_config()
snake_case__ : List[Any] = scheduler_class(**__SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(__SCREAMING_SNAKE_CASE )
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2] ):
snake_case__ : Union[str, Any] = scheduler.step_prk(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).prev_sample
def __UpperCamelCase ( self ):
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
snake_case__ : Tuple = self.scheduler_classes[0]
snake_case__ : Optional[int] = self.get_scheduler_config()
snake_case__ : str = scheduler_class(**__SCREAMING_SNAKE_CASE )
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample
def __UpperCamelCase ( self ):
snake_case__ : List[Any] = self.full_loop()
snake_case__ : Optional[int] = torch.sum(torch.abs(__SCREAMING_SNAKE_CASE ) )
snake_case__ : int = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 198.1318 ) < 1e-2
assert abs(result_mean.item() - 0.2580 ) < 1e-3
def __UpperCamelCase ( self ):
snake_case__ : Tuple = self.full_loop(prediction_type="""v_prediction""" )
snake_case__ : List[Any] = torch.sum(torch.abs(__SCREAMING_SNAKE_CASE ) )
snake_case__ : List[str] = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 67.3986 ) < 1e-2
assert abs(result_mean.item() - 0.0878 ) < 1e-3
def __UpperCamelCase ( self ):
# We specify different beta, so that the first alpha is 0.99
snake_case__ : Optional[int] = self.full_loop(set_alpha_to_one=__SCREAMING_SNAKE_CASE , beta_start=0.01 )
snake_case__ : str = torch.sum(torch.abs(__SCREAMING_SNAKE_CASE ) )
snake_case__ : Dict = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 230.0399 ) < 1e-2
assert abs(result_mean.item() - 0.2995 ) < 1e-3
def __UpperCamelCase ( self ):
# We specify different beta, so that the first alpha is 0.99
snake_case__ : Dict = self.full_loop(set_alpha_to_one=__SCREAMING_SNAKE_CASE , beta_start=0.01 )
snake_case__ : List[Any] = torch.sum(torch.abs(__SCREAMING_SNAKE_CASE ) )
snake_case__ : List[Any] = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 186.9482 ) < 1e-2
assert abs(result_mean.item() - 0.2434 ) < 1e-3
| 38 | from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
UpperCAmelCase = logging.get_logger(__name__)
class snake_case__ ( _UpperCamelCase ):
_SCREAMING_SNAKE_CASE : str = ["pixel_values"]
def __init__( self : List[Any] , A__ : bool = True , A__ : Optional[Dict[str, int]] = None , A__ : PILImageResampling = PILImageResampling.BILINEAR , A__ : bool = True , A__ : Dict[str, int] = None , A__ : bool = True , A__ : Union[int, float] = 1 / 2_55 , A__ : bool = True , A__ : Optional[Union[float, List[float]]] = None , A__ : Optional[Union[float, List[float]]] = None , **A__ : int , ) -> None:
'''simple docstring'''
super().__init__(**A__ )
snake_case_ : Optional[int] = size if size is not None else {"shortest_edge": 2_56}
snake_case_ : Dict = get_size_dict(A__ , default_to_square=A__ )
snake_case_ : List[str] = crop_size if crop_size is not None else {"height": 2_24, "width": 2_24}
snake_case_ : Any = get_size_dict(A__ , param_name="crop_size" )
snake_case_ : int = do_resize
snake_case_ : Optional[Any] = size
snake_case_ : Optional[Any] = resample
snake_case_ : Optional[int] = do_center_crop
snake_case_ : List[Any] = crop_size
snake_case_ : List[Any] = do_rescale
snake_case_ : Optional[int] = rescale_factor
snake_case_ : Optional[Any] = do_normalize
snake_case_ : List[Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
snake_case_ : Optional[Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCAmelCase__ ( self : List[str] , A__ : np.ndarray , A__ : Dict[str, int] , A__ : PILImageResampling = PILImageResampling.BICUBIC , A__ : Optional[Union[str, ChannelDimension]] = None , **A__ : str , ) -> np.ndarray:
'''simple docstring'''
snake_case_ : Optional[Any] = get_size_dict(A__ , default_to_square=A__ )
if "shortest_edge" not in size:
raise ValueError(f"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}" )
snake_case_ : Any = get_resize_output_image_size(A__ , size=size["shortest_edge"] , default_to_square=A__ )
return resize(A__ , size=A__ , resample=A__ , data_format=A__ , **A__ )
def UpperCAmelCase__ ( self : int , A__ : np.ndarray , A__ : Dict[str, int] , A__ : Optional[Union[str, ChannelDimension]] = None , **A__ : Union[str, Any] , ) -> np.ndarray:
'''simple docstring'''
snake_case_ : Tuple = get_size_dict(A__ )
if "height" not in size or "width" not in size:
raise ValueError(f"The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}" )
return center_crop(A__ , size=(size["height"], size["width"]) , data_format=A__ , **A__ )
def UpperCAmelCase__ ( self : List[str] , A__ : np.ndarray , A__ : float , A__ : Optional[Union[str, ChannelDimension]] = None , **A__ : Tuple ) -> np.ndarray:
'''simple docstring'''
return rescale(A__ , scale=A__ , data_format=A__ , **A__ )
def UpperCAmelCase__ ( self : Tuple , A__ : np.ndarray , A__ : Union[float, List[float]] , A__ : Union[float, List[float]] , A__ : Optional[Union[str, ChannelDimension]] = None , **A__ : Dict , ) -> np.ndarray:
'''simple docstring'''
return normalize(A__ , mean=A__ , std=A__ , data_format=A__ , **A__ )
def UpperCAmelCase__ ( self : Union[str, Any] , A__ : ImageInput , A__ : Optional[bool] = None , A__ : Dict[str, int] = None , A__ : PILImageResampling = None , A__ : bool = None , A__ : Dict[str, int] = None , A__ : Optional[bool] = None , A__ : Optional[float] = None , A__ : Optional[bool] = None , A__ : Optional[Union[float, List[float]]] = None , A__ : Optional[Union[float, List[float]]] = None , A__ : Optional[Union[str, TensorType]] = None , A__ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **A__ : Union[str, Any] , ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Union[str, Any] = do_resize if do_resize is not None else self.do_resize
snake_case_ : Dict = size if size is not None else self.size
snake_case_ : Optional[Any] = get_size_dict(A__ , default_to_square=A__ )
snake_case_ : Tuple = resample if resample is not None else self.resample
snake_case_ : Union[str, Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
snake_case_ : str = crop_size if crop_size is not None else self.crop_size
snake_case_ : Tuple = get_size_dict(A__ , param_name="crop_size" )
snake_case_ : Dict = do_rescale if do_rescale is not None else self.do_rescale
snake_case_ : str = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case_ : Any = do_normalize if do_normalize is not None else self.do_normalize
snake_case_ : Any = image_mean if image_mean is not None else self.image_mean
snake_case_ : List[str] = image_std if image_std is not None else self.image_std
snake_case_ : Dict = make_list_of_images(A__ )
if not valid_images(A__ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
snake_case_ : Tuple = [to_numpy_array(A__ ) for image in images]
if do_resize:
snake_case_ : Any = [self.resize(image=A__ , size=A__ , resample=A__ ) for image in images]
if do_center_crop:
snake_case_ : List[str] = [self.center_crop(image=A__ , size=A__ ) for image in images]
if do_rescale:
snake_case_ : Any = [self.rescale(image=A__ , scale=A__ ) for image in images]
if do_normalize:
snake_case_ : Union[str, Any] = [self.normalize(image=A__ , mean=A__ , std=A__ ) for image in images]
snake_case_ : Optional[Any] = [to_channel_dimension_format(A__ , A__ ) for image in images]
snake_case_ : Any = {"pixel_values": images}
return BatchFeature(data=A__ , tensor_type=A__ )
def UpperCAmelCase__ ( self : List[str] , A__ : Dict , A__ : List[Tuple] = None ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Tuple = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(A__ ) != len(A__ ):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits" )
if is_torch_tensor(A__ ):
snake_case_ : Dict = target_sizes.numpy()
snake_case_ : int = []
for idx in range(len(A__ ) ):
snake_case_ : List[str] = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="bilinear" , align_corners=A__ )
snake_case_ : int = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(A__ )
else:
snake_case_ : List[Any] = logits.argmax(dim=1 )
snake_case_ : List[Any] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 666 | 0 |
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class snake_case_ :
'''simple docstring'''
def __init__( self : Union[str, Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : str=3 , _UpperCamelCase : List[Any]=3_2 , _UpperCamelCase : Union[str, Any]=3 , _UpperCamelCase : Union[str, Any]=1_0 , _UpperCamelCase : Any=[8, 1_6, 3_2, 6_4] , _UpperCamelCase : Any=[1, 1, 2, 1] , _UpperCamelCase : List[Any]=True , _UpperCamelCase : Any=True , _UpperCamelCase : Any="relu" , _UpperCamelCase : List[Any]=3 , _UpperCamelCase : Any=None , _UpperCamelCase : Dict=["stage2", "stage3", "stage4"] , _UpperCamelCase : str=[2, 3, 4] , _UpperCamelCase : List[str]=1 , ) ->Dict:
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = image_size
snake_case_ = num_channels
snake_case_ = embeddings_size
snake_case_ = hidden_sizes
snake_case_ = depths
snake_case_ = is_training
snake_case_ = use_labels
snake_case_ = hidden_act
snake_case_ = num_labels
snake_case_ = scope
snake_case_ = len(_UpperCamelCase )
snake_case_ = out_features
snake_case_ = out_indices
snake_case_ = num_groups
def snake_case__( self : Optional[int] ) ->Optional[int]:
snake_case_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size] , self.num_labels )
snake_case_ = self.get_config()
return config, pixel_values, labels
def snake_case__( self : Tuple ) ->List[str]:
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def snake_case__( self : Dict , _UpperCamelCase : Tuple , _UpperCamelCase : str , _UpperCamelCase : Any ) ->Tuple:
snake_case_ = BitModel(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
snake_case_ = model(_UpperCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def snake_case__( self : int , _UpperCamelCase : Tuple , _UpperCamelCase : List[str] , _UpperCamelCase : List[str] ) ->Optional[Any]:
snake_case_ = self.num_labels
snake_case_ = BitForImageClassification(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
snake_case_ = model(_UpperCamelCase , labels=_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case__( self : List[str] , _UpperCamelCase : int , _UpperCamelCase : Tuple , _UpperCamelCase : str ) ->Optional[Any]:
snake_case_ = BitBackbone(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
snake_case_ = model(_UpperCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
snake_case_ = None
snake_case_ = BitBackbone(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
snake_case_ = model(_UpperCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def snake_case__( self : str ) ->Optional[Any]:
snake_case_ = self.prepare_config_and_inputs()
snake_case_, snake_case_, snake_case_ = config_and_inputs
snake_case_ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class snake_case_ ( __A , __A , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
SCREAMING_SNAKE_CASE : List[str] = (
{"feature-extraction": BitModel, "image-classification": BitForImageClassification}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE : List[str] = False
SCREAMING_SNAKE_CASE : Any = False
SCREAMING_SNAKE_CASE : List[str] = False
SCREAMING_SNAKE_CASE : Tuple = False
SCREAMING_SNAKE_CASE : Dict = False
def snake_case__( self : Union[str, Any] ) ->List[Any]:
snake_case_ = BitModelTester(self )
snake_case_ = ConfigTester(self , config_class=_UpperCamelCase , has_text_modality=_UpperCamelCase )
def snake_case__( self : List[Any] ) ->Dict:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def snake_case__( self : Any ) ->Union[str, Any]:
return
@unittest.skip(reason='''Bit does not output attentions''' )
def snake_case__( self : int ) ->Tuple:
pass
@unittest.skip(reason='''Bit does not use inputs_embeds''' )
def snake_case__( self : str ) ->Optional[Any]:
pass
@unittest.skip(reason='''Bit does not support input and output embeddings''' )
def snake_case__( self : Union[str, Any] ) ->Optional[Any]:
pass
def snake_case__( self : Optional[Any] ) ->Optional[int]:
snake_case_, snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ = model_class(_UpperCamelCase )
snake_case_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case_ = [*signature.parameters.keys()]
snake_case_ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _UpperCamelCase )
def snake_case__( self : Tuple ) ->str:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCamelCase )
def snake_case__( self : str ) ->Optional[Any]:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_UpperCamelCase )
def snake_case__( self : Tuple ) ->int:
snake_case_, snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ = model_class(config=_UpperCamelCase )
for name, module in model.named_modules():
if isinstance(_UpperCamelCase , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
def snake_case__( self : Union[str, Any] ) ->str:
def check_hidden_states_output(_UpperCamelCase : List[Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : Optional[Any] ):
snake_case_ = model_class(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
with torch.no_grad():
snake_case_ = model(**self._prepare_for_class(_UpperCamelCase , _UpperCamelCase ) )
snake_case_ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
snake_case_ = self.model_tester.num_stages
self.assertEqual(len(_UpperCamelCase ) , expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
snake_case_, snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ = ['''preactivation''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
snake_case_ = layer_type
snake_case_ = True
check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case_ = True
check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
@unittest.skip(reason='''Bit does not use feedforward chunking''' )
def snake_case__( self : str ) ->Optional[Any]:
pass
def snake_case__( self : Dict ) ->Dict:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCamelCase )
@slow
def snake_case__( self : Optional[int] ) ->Any:
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ = BitModel.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
def __SCREAMING_SNAKE_CASE ():
snake_case_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def snake_case__( self : Optional[Any] ) ->int:
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def snake_case__( self : str ) ->Optional[int]:
snake_case_ = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(_UpperCamelCase )
snake_case_ = self.default_image_processor
snake_case_ = prepare_img()
snake_case_ = image_processor(images=_UpperCamelCase , return_tensors='''pt''' ).to(_UpperCamelCase )
# forward pass
with torch.no_grad():
snake_case_ = model(**_UpperCamelCase )
# verify the logits
snake_case_ = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , _UpperCamelCase )
snake_case_ = torch.tensor([[-0.6526, -0.5263, -1.4398]] ).to(_UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCamelCase , atol=1e-4 ) )
@require_torch
class snake_case_ ( __A , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = (BitBackbone,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE : Optional[Any] = BitConfig
SCREAMING_SNAKE_CASE : Optional[int] = False
def snake_case__( self : Optional[Any] ) ->Tuple:
snake_case_ = BitModelTester(self ) | 39 | from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase = {
"configuration_blenderbot": [
"BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlenderbotConfig",
"BlenderbotOnnxConfig",
],
"tokenization_blenderbot": ["BlenderbotTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = ["BlenderbotTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
"BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlenderbotForCausalLM",
"BlenderbotForConditionalGeneration",
"BlenderbotModel",
"BlenderbotPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
"TFBlenderbotForConditionalGeneration",
"TFBlenderbotModel",
"TFBlenderbotPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
"FlaxBlenderbotForConditionalGeneration",
"FlaxBlenderbotModel",
"FlaxBlenderbotPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 666 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.