code stringlengths 82 54.1k | code_codestyle int64 0 699 | style_context stringlengths 111 35.6k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class lowerCamelCase_ ( lowerCamelCase ):
def __init__( self , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = False , __lowerCAmelCase = False , __lowerCAmelCase = None , __lowerCAmelCase = None , **__lowerCAmelCase , ):
"""simple docstring"""
super().__init__(
__lowerCAmelCase , split=__lowerCAmelCase , features=__lowerCAmelCase , cache_dir=__lowerCAmelCase , keep_in_memory=__lowerCAmelCase , streaming=__lowerCAmelCase , num_proc=__lowerCAmelCase , **__lowerCAmelCase , )
__magic_name__ :Optional[int] = field
__magic_name__ :List[Any] = path_or_paths if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else {self.split: path_or_paths}
__magic_name__ :Optional[int] = Json(
cache_dir=__lowerCAmelCase , data_files=__lowerCAmelCase , features=__lowerCAmelCase , field=__lowerCAmelCase , **__lowerCAmelCase , )
def A ( self ):
"""simple docstring"""
# Build iterable dataset
if self.streaming:
__magic_name__ :Dict = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
__magic_name__ :int = None
__magic_name__ :Optional[Any] = None
__magic_name__ :int = None
__magic_name__ :str = None
self.builder.download_and_prepare(
download_config=__lowerCAmelCase , download_mode=__lowerCAmelCase , verification_mode=__lowerCAmelCase , base_path=__lowerCAmelCase , num_proc=self.num_proc , )
__magic_name__ :Union[str, Any] = self.builder.as_dataset(
split=self.split , verification_mode=__lowerCAmelCase , in_memory=self.keep_in_memory )
return dataset
class lowerCamelCase_ :
def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = None , **__lowerCAmelCase , ):
"""simple docstring"""
if num_proc is not None and num_proc <= 0:
raise ValueError(F'''num_proc {num_proc} must be an integer > 0.''' )
__magic_name__ :Tuple = dataset
__magic_name__ :Tuple = path_or_buf
__magic_name__ :Tuple = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
__magic_name__ :Dict = num_proc
__magic_name__ :Dict = '''utf-8'''
__magic_name__ :List[Any] = to_json_kwargs
def A ( self ):
"""simple docstring"""
__magic_name__ :List[Any] = self.to_json_kwargs.pop('''path_or_buf''' , __lowerCAmelCase )
__magic_name__ :Any = self.to_json_kwargs.pop('''orient''' , '''records''' )
__magic_name__ :Dict = self.to_json_kwargs.pop('''lines''' , True if orient == '''records''' else False )
__magic_name__ :Union[str, Any] = self.to_json_kwargs.pop('''index''' , False if orient in ['''split''', '''table'''] else True )
__magic_name__ :Union[str, Any] = self.to_json_kwargs.pop('''compression''' , __lowerCAmelCase )
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(F'''`datasets` currently does not support {compression} compression''' )
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with fsspec.open(self.path_or_buf , '''wb''' , compression=__lowerCAmelCase ) as buffer:
__magic_name__ :Optional[Any] = self._write(file_obj=__lowerCAmelCase , orient=__lowerCAmelCase , lines=__lowerCAmelCase , index=__lowerCAmelCase , **self.to_json_kwargs )
else:
if compression:
raise NotImplementedError(
F'''The compression parameter is not supported when writing to a buffer, but compression={compression}'''
''' was passed. Please provide a local path instead.''' )
__magic_name__ :int = self._write(
file_obj=self.path_or_buf , orient=__lowerCAmelCase , lines=__lowerCAmelCase , index=__lowerCAmelCase , **self.to_json_kwargs )
return written
def A ( self , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ :Tuple = args
__magic_name__ :int = query_table(
table=self.dataset.data , key=slice(__lowerCAmelCase , offset + self.batch_size ) , indices=self.dataset._indices , )
__magic_name__ :List[Any] = batch.to_pandas().to_json(
path_or_buf=__lowerCAmelCase , orient=__lowerCAmelCase , lines=__lowerCAmelCase , index=__lowerCAmelCase , **__lowerCAmelCase )
if not json_str.endswith('''\n''' ):
json_str += "\n"
return json_str.encode(self.encoding )
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase , ):
"""simple docstring"""
__magic_name__ :Tuple = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating json from Arrow format''' , ):
__magic_name__ :Union[str, Any] = self._batch_json((offset, orient, lines, index, to_json_kwargs) )
written += file_obj.write(__lowerCAmelCase )
else:
__magic_name__ , __magic_name__ :List[Any] = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , __lowerCAmelCase , __lowerCAmelCase )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating json from Arrow format''' , ):
written += file_obj.write(__lowerCAmelCase )
return written
| 0 |
'''simple docstring'''
from torch import nn
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(f'''Unsupported activation function: {act_fn}''' )
| 672 | 0 |
import numpy as np
from PIL import Image
def _A ( _lowercase , _lowercase , _lowercase ) -> np.ndarray:
"""simple docstring"""
__UpperCamelCase = np.array(_lowercase )
if arr.shape[0] != arr.shape[1]:
raise ValueError('The input array is not a square matrix' )
__UpperCamelCase = 0
__UpperCamelCase = 0
__UpperCamelCase = 0
__UpperCamelCase = 0
# compute the shape of the output matrix
__UpperCamelCase = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape maxpool_shape
__UpperCamelCase = np.zeros((maxpool_shape, maxpool_shape) )
while i < arr.shape[0]:
if i + size > arr.shape[0]:
# if the end of the matrix is reached, break
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the maximum of the pooling matrix
__UpperCamelCase = np.max(arr[i : i + size, j : j + size] )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
__UpperCamelCase = 0
__UpperCamelCase = 0
return updated_arr
def _A ( _lowercase , _lowercase , _lowercase ) -> np.ndarray:
"""simple docstring"""
__UpperCamelCase = np.array(_lowercase )
if arr.shape[0] != arr.shape[1]:
raise ValueError('The input array is not a square matrix' )
__UpperCamelCase = 0
__UpperCamelCase = 0
__UpperCamelCase = 0
__UpperCamelCase = 0
# compute the shape of the output matrix
__UpperCamelCase = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape avgpool_shape
__UpperCamelCase = np.zeros((avgpool_shape, avgpool_shape) )
while i < arr.shape[0]:
# if the end of the matrix is reached, break
if i + size > arr.shape[0]:
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the average of the pooling matrix
__UpperCamelCase = int(np.average(arr[i : i + size, j : j + size] ) )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
__UpperCamelCase = 0
__UpperCamelCase = 0
return updated_arr
# Main Function
if __name__ == "__main__":
from doctest import testmod
testmod(name='''avgpooling''', verbose=True)
# Loading the image
__snake_case = Image.open('''path_to_image''')
# Converting the image to numpy array and maxpooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show()
# Converting the image to numpy array and averagepooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
| 1 |
'''simple docstring'''
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
__magic_name__ : Tuple = 0
__magic_name__ : Dict = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
__magic_name__ : str = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
__magic_name__ : Dict = tuple[int, int]
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ):
_snake_case = pos_x
_snake_case = pos_y
_snake_case = (pos_y, pos_x)
_snake_case = goal_x
_snake_case = goal_y
_snake_case = g_cost
_snake_case = parent
_snake_case = self.calculate_heuristic()
_snake_case = self.g_cost + self.h_cost
def UpperCamelCase( self ):
_snake_case = self.pos_x - self.goal_x
_snake_case = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(lowerCamelCase ) + abs(lowerCamelCase )
else:
return sqrt(dy**2 + dx**2 )
def __lt__( self , lowerCamelCase ):
return self.f_cost < other.f_cost
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self , lowerCamelCase , lowerCamelCase ):
_snake_case = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , lowerCamelCase )
_snake_case = Node(goal[1] , goal[0] , goal[1] , goal[0] , 99_999 , lowerCamelCase )
_snake_case = [self.start]
_snake_case = []
_snake_case = False
def UpperCamelCase( self ):
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
_snake_case = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(lowerCamelCase )
self.closed_nodes.append(lowerCamelCase )
_snake_case = self.get_successors(lowerCamelCase )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(lowerCamelCase )
else:
# retrieve the best current path
_snake_case = self.open_nodes.pop(self.open_nodes.index(lowerCamelCase ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(lowerCamelCase )
else:
self.open_nodes.append(lowerCamelCase )
return [self.start.pos]
def UpperCamelCase( self , lowerCamelCase ):
_snake_case = []
for action in delta:
_snake_case = parent.pos_x + action[1]
_snake_case = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(lowerCamelCase ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
lowerCamelCase , lowerCamelCase , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , lowerCamelCase , ) )
return successors
def UpperCamelCase( self , lowerCamelCase ):
_snake_case = node
_snake_case = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
_snake_case = current_node.parent
path.reverse()
return path
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self , lowerCamelCase , lowerCamelCase ):
_snake_case = AStar(lowerCamelCase , lowerCamelCase )
_snake_case = AStar(lowerCamelCase , lowerCamelCase )
_snake_case = False
def UpperCamelCase( self ):
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
_snake_case = self.fwd_astar.open_nodes.pop(0 )
_snake_case = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
lowerCamelCase , lowerCamelCase )
self.fwd_astar.closed_nodes.append(lowerCamelCase )
self.bwd_astar.closed_nodes.append(lowerCamelCase )
_snake_case = current_bwd_node
_snake_case = current_fwd_node
_snake_case = {
self.fwd_astar: self.fwd_astar.get_successors(lowerCamelCase ),
self.bwd_astar: self.bwd_astar.get_successors(lowerCamelCase ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(lowerCamelCase )
else:
# retrieve the best current path
_snake_case = astar.open_nodes.pop(
astar.open_nodes.index(lowerCamelCase ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(lowerCamelCase )
else:
astar.open_nodes.append(lowerCamelCase )
return [self.fwd_astar.start.pos]
def UpperCamelCase( self , lowerCamelCase , lowerCamelCase ):
_snake_case = self.fwd_astar.retrace_path(lowerCamelCase )
_snake_case = self.bwd_astar.retrace_path(lowerCamelCase )
bwd_path.pop()
bwd_path.reverse()
_snake_case = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
__magic_name__ : Optional[int] = (0, 0)
__magic_name__ : Any = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
__magic_name__ : Any = time.time()
__magic_name__ : Optional[int] = AStar(init, goal)
__magic_name__ : str = a_star.search()
__magic_name__ : List[Any] = time.time() - start_time
print(F'AStar execution time = {end_time:f} seconds')
__magic_name__ : List[str] = time.time()
__magic_name__ : Optional[Any] = BidirectionalAStar(init, goal)
__magic_name__ : Optional[int] = time.time() - bd_start_time
print(F'BidirectionalAStar execution time = {bd_end_time:f} seconds')
| 672 | 0 |
import numpy as np
import torch
import tqdm
from ...models.unet_ad import UNetaDModel
from ...pipelines import DiffusionPipeline
from ...utils import randn_tensor
from ...utils.dummy_pt_objects import DDPMScheduler
class lowerCamelCase__ ( _A):
"""simple docstring"""
def __init__( self : List[str] , __lowerCAmelCase : UNetaDModel , __lowerCAmelCase : UNetaDModel , __lowerCAmelCase : DDPMScheduler , __lowerCAmelCase : Any , ) -> Any:
super().__init__()
_A = value_function
_A = unet
_A = scheduler
_A = env
_A = env.get_dataset()
_A = {}
for key in self.data.keys():
try:
_A = self.data[key].mean()
except: # noqa: E722
pass
_A = {}
for key in self.data.keys():
try:
_A = self.data[key].std()
except: # noqa: E722
pass
_A = env.observation_space.shape[0]
_A = env.action_space.shape[0]
def snake_case_ ( self : Union[str, Any] , __lowerCAmelCase : Any , __lowerCAmelCase : int ) -> Any:
return (x_in - self.means[key]) / self.stds[key]
def snake_case_ ( self : Union[str, Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any ) -> Any:
return x_in * self.stds[key] + self.means[key]
def snake_case_ ( self : List[str] , __lowerCAmelCase : Optional[Any] ) -> List[str]:
if type(__lowerCAmelCase ) is dict:
return {k: self.to_torch(__lowerCAmelCase ) for k, v in x_in.items()}
elif torch.is_tensor(__lowerCAmelCase ):
return x_in.to(self.unet.device )
return torch.tensor(__lowerCAmelCase , device=self.unet.device )
def snake_case_ ( self : Tuple , __lowerCAmelCase : Any , __lowerCAmelCase : List[Any] , __lowerCAmelCase : int ) -> List[Any]:
for key, val in cond.items():
_A = val.clone()
return x_in
def snake_case_ ( self : Any , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[int] ) -> Tuple:
_A = x.shape[0]
_A = None
for i in tqdm.tqdm(self.scheduler.timesteps ):
# create batch of timesteps to pass into model
_A = torch.full((batch_size,) , __lowerCAmelCase , device=self.unet.device , dtype=torch.long )
for _ in range(__lowerCAmelCase ):
with torch.enable_grad():
x.requires_grad_()
# permute to match dimension for pre-trained models
_A = self.value_function(x.permute(0 , 2 , 1 ) , __lowerCAmelCase ).sample
_A = torch.autograd.grad([y.sum()] , [x] )[0]
_A = self.scheduler._get_variance(__lowerCAmelCase )
_A = torch.exp(0.5 * posterior_variance )
_A = model_std * grad
_A = 0
_A = x.detach()
_A = x + scale * grad
_A = self.reset_xa(__lowerCAmelCase , __lowerCAmelCase , self.action_dim )
_A = self.unet(x.permute(0 , 2 , 1 ) , __lowerCAmelCase ).sample.permute(0 , 2 , 1 )
# TODO: verify deprecation of this kwarg
_A = self.scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , predict_epsilon=__lowerCAmelCase )['''prev_sample''']
# apply conditions to the trajectory (set the initial state)
_A = self.reset_xa(__lowerCAmelCase , __lowerCAmelCase , self.action_dim )
_A = self.to_torch(__lowerCAmelCase )
return x, y
def __call__( self : Dict , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Any]=64 , __lowerCAmelCase : Union[str, Any]=32 , __lowerCAmelCase : Optional[int]=2 , __lowerCAmelCase : Any=0.1 ) -> List[str]:
# normalize the observations and create batch dimension
_A = self.normalize(__lowerCAmelCase , '''observations''' )
_A = obs[None].repeat(__lowerCAmelCase , axis=0 )
_A = {0: self.to_torch(__lowerCAmelCase )}
_A = (batch_size, planning_horizon, self.state_dim + self.action_dim)
# generate initial noise and apply our conditions (to make the trajectories start at current state)
_A = randn_tensor(__lowerCAmelCase , device=self.unet.device )
_A = self.reset_xa(__lowerCAmelCase , __lowerCAmelCase , self.action_dim )
_A = self.to_torch(__lowerCAmelCase )
# run the diffusion process
_A , _A = self.run_diffusion(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# sort output trajectories by value
_A = y.argsort(0 , descending=__lowerCAmelCase ).squeeze()
_A = x[sorted_idx]
_A = sorted_values[:, :, : self.action_dim]
_A = actions.detach().cpu().numpy()
_A = self.de_normalize(__lowerCAmelCase , key='''actions''' )
# select the action with the highest value
if y is not None:
_A = 0
else:
# if we didn't run value guiding, select a random action
_A = np.random.randint(0 , __lowerCAmelCase )
_A = denorm_actions[selected_index, 0]
return denorm_actions
| 2 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__magic_name__ : int = {
"""configuration_nezha""": ["""NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """NezhaConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : List[Any] = [
"""NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""NezhaForNextSentencePrediction""",
"""NezhaForMaskedLM""",
"""NezhaForPreTraining""",
"""NezhaForMultipleChoice""",
"""NezhaForQuestionAnswering""",
"""NezhaForSequenceClassification""",
"""NezhaForTokenClassification""",
"""NezhaModel""",
"""NezhaPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nezha import (
NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
NezhaPreTrainedModel,
)
else:
import sys
__magic_name__ : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 672 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase : Union[str, Any] = {
'configuration_git': ['GIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GitConfig', 'GitVisionConfig'],
'processing_git': ['GitProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : List[Any] = [
'GIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'GitForCausalLM',
'GitModel',
'GitPreTrainedModel',
'GitVisionModel',
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
lowerCAmelCase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 3 |
'''simple docstring'''
import string
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
_snake_case = ""
for i in sequence:
_snake_case = ord(SCREAMING_SNAKE_CASE__ )
if 65 <= extract <= 90:
output += chr(1_55 - extract )
elif 97 <= extract <= 1_22:
output += chr(2_19 - extract )
else:
output += i
return output
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
_snake_case = string.ascii_letters
_snake_case = string.ascii_lowercase[::-1] + string.ascii_uppercase[::-1]
return "".join(
letters_reversed[letters.index(SCREAMING_SNAKE_CASE__ )] if c in letters else c for c in sequence )
def snake_case_ ( ):
'''simple docstring'''
from timeit import timeit
print("Running performance benchmarks..." )
_snake_case = "from string import printable ; from __main__ import atbash, atbash_slow"
print(f'''> atbash_slow(): {timeit("atbash_slow(printable)" , setup=SCREAMING_SNAKE_CASE__ )} seconds''' )
print(f'''> atbash(): {timeit("atbash(printable)" , setup=SCREAMING_SNAKE_CASE__ )} seconds''' )
if __name__ == "__main__":
for example in ("ABCDEFGH", "123GGjj", "testStringtest", "with space"):
print(F'{example} encrypted in atbash: {atbash(example)}')
benchmark()
| 672 | 0 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel
@require_tf
class a :
snake_case__ = BlenderbotSmallConfig
snake_case__ = {}
snake_case__ = '''gelu'''
def __init__( self , _snake_case , _snake_case=13 , _snake_case=7 , _snake_case=True , _snake_case=False , _snake_case=99 , _snake_case=32 , _snake_case=2 , _snake_case=4 , _snake_case=37 , _snake_case=0.1 , _snake_case=0.1 , _snake_case=20 , _snake_case=2 , _snake_case=1 , _snake_case=0 , ):
"""simple docstring"""
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = seq_length
lowerCAmelCase = is_training
lowerCAmelCase = use_labels
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = eos_token_id
lowerCAmelCase = pad_token_id
lowerCAmelCase = bos_token_id
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
lowerCAmelCase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
lowerCAmelCase = tf.concat([input_ids, eos_tensor] , axis=1 )
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
lowerCAmelCase = prepare_blenderbot_small_inputs_dict(_snake_case , _snake_case , _snake_case )
return config, inputs_dict
def UpperCamelCase__ ( self , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase = TFBlenderbotSmallModel(config=_snake_case ).get_decoder()
lowerCAmelCase = inputs_dict['input_ids']
lowerCAmelCase = input_ids[:1, :]
lowerCAmelCase = inputs_dict['attention_mask'][:1, :]
lowerCAmelCase = inputs_dict['head_mask']
lowerCAmelCase = 1
# first forward pass
lowerCAmelCase = model(_snake_case , attention_mask=_snake_case , head_mask=_snake_case , use_cache=_snake_case )
lowerCAmelCase ,lowerCAmelCase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
lowerCAmelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCAmelCase = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
lowerCAmelCase = tf.concat([input_ids, next_tokens] , axis=-1 )
lowerCAmelCase = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
lowerCAmelCase = model(_snake_case , attention_mask=_snake_case )[0]
lowerCAmelCase = model(_snake_case , attention_mask=_snake_case , past_key_values=_snake_case )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
lowerCAmelCase = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
lowerCAmelCase = output_from_no_past[:, -3:, random_slice_idx]
lowerCAmelCase = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(_snake_case , _snake_case , rtol=1E-3 )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[int]=None , _UpperCAmelCase : Dict=None , _UpperCAmelCase : Optional[int]=None , _UpperCAmelCase : Union[str, Any]=None , _UpperCAmelCase : str=None , ):
if attention_mask is None:
lowerCAmelCase = tf.cast(tf.math.not_equal(_UpperCAmelCase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
lowerCAmelCase = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
lowerCAmelCase = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
lowerCAmelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
lowerCAmelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class a ( a__ , a__ , unittest.TestCase ):
snake_case__ = (
(TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else ()
)
snake_case__ = (TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else ()
snake_case__ = (
{
'''conversational''': TFBlenderbotSmallForConditionalGeneration,
'''feature-extraction''': TFBlenderbotSmallModel,
'''summarization''': TFBlenderbotSmallForConditionalGeneration,
'''text2text-generation''': TFBlenderbotSmallForConditionalGeneration,
'''translation''': TFBlenderbotSmallForConditionalGeneration,
}
if is_tf_available()
else {}
)
snake_case__ = True
snake_case__ = False
snake_case__ = False
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = TFBlenderbotSmallModelTester(self )
lowerCAmelCase = ConfigTester(self , config_class=_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_snake_case )
@require_tokenizers
@require_tf
class a ( unittest.TestCase ):
snake_case__ = [
'''Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like '''
''' i\'m going to throw up.\nand why is that?'''
]
snake_case__ = '''facebook/blenderbot_small-90M'''
@cached_property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return BlenderbotSmallTokenizer.from_pretrained('facebook/blenderbot-90M' )
@cached_property
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.tokenizer(self.src_text , return_tensors='tf' )
lowerCAmelCase = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=_snake_case , )
lowerCAmelCase = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=_snake_case )[0]
assert generated_words in (
"i don't know. i just feel like i'm going to throw up. it's not fun.",
"i'm not sure. i just feel like i've been feeling like i have to be in a certain place",
"i'm not sure. i just feel like i've been in a bad situation.",
)
| 4 |
'''simple docstring'''
import numpy as np
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return 1 / (1 + np.exp(-vector ))
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return vector * sigmoid(1.702 * vector )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 672 | 0 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowercase : torch.FloatTensor
_lowercase : torch.FloatTensor
_lowercase : Optional[torch.FloatTensor] = None
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowercase : Optional[int] = 2
@register_to_config
def __init__( self , _lowercase = 0.02 , _lowercase = 100 , _lowercase = 1.007 , _lowercase = 80 , _lowercase = 0.05 , _lowercase = 50 , ):
"""simple docstring"""
_lowerCAmelCase = sigma_max
# setable values
_lowerCAmelCase = None
_lowerCAmelCase = None
_lowerCAmelCase = None # sigma(t_i)
def _lowercase ( self , _lowercase , _lowercase = None ):
"""simple docstring"""
return sample
def _lowercase ( self , _lowercase , _lowercase = None ):
"""simple docstring"""
_lowerCAmelCase = num_inference_steps
_lowerCAmelCase = np.arange(0 , self.num_inference_steps )[::-1].copy()
_lowerCAmelCase = torch.from_numpy(_lowercase ).to(_lowercase )
_lowerCAmelCase = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in self.timesteps
]
_lowerCAmelCase = torch.tensor(_lowercase , dtype=torch.floataa , device=_lowercase )
def _lowercase ( self , _lowercase , _lowercase , _lowercase = None ):
"""simple docstring"""
if self.config.s_min <= sigma <= self.config.s_max:
_lowerCAmelCase = min(self.config.s_churn / self.num_inference_steps , 2**0.5 - 1 )
else:
_lowerCAmelCase = 0
# sample eps ~ N(0, S_noise^2 * I)
_lowerCAmelCase = self.config.s_noise * randn_tensor(sample.shape , generator=_lowercase ).to(sample.device )
_lowerCAmelCase = sigma + gamma * sigma
_lowerCAmelCase = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def _lowercase ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase = True , ):
"""simple docstring"""
_lowerCAmelCase = sample_hat + sigma_hat * model_output
_lowerCAmelCase = (sample_hat - pred_original_sample) / sigma_hat
_lowerCAmelCase = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=_lowercase , derivative=_lowercase , pred_original_sample=_lowercase )
def _lowercase ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase = True , ):
"""simple docstring"""
_lowerCAmelCase = sample_prev + sigma_prev * model_output
_lowerCAmelCase = (sample_prev - pred_original_sample) / sigma_prev
_lowerCAmelCase = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=_lowercase , derivative=_lowercase , pred_original_sample=_lowercase )
def _lowercase ( self , _lowercase , _lowercase , _lowercase ):
"""simple docstring"""
raise NotImplementedError()
| 5 |
'''simple docstring'''
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('''TEST_SAGEMAKER''' , '''False''' ) ) is not True , reason='''Skipping test because should only be run when releasing minor transformers version''' , )
@pytest.mark.usefixtures('''sm_env''' )
@parameterized_class(
[
{
'''framework''': '''pytorch''',
'''script''': '''run_glue.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.g4dn.xlarge''',
'''results''': {'''train_runtime''': 650, '''eval_accuracy''': 0.6, '''eval_loss''': 0.9},
},
{
'''framework''': '''tensorflow''',
'''script''': '''run_tf.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.g4dn.xlarge''',
'''results''': {'''train_runtime''': 600, '''eval_accuracy''': 0.3, '''eval_loss''': 0.9},
},
] )
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase( self ):
if self.framework == "pytorch":
subprocess.run(
F'''cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'''.split() , encoding="utf-8" , check=lowerCamelCase , )
assert hasattr(self , "env" )
def UpperCamelCase( self , lowerCamelCase=1 ):
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=F'''{self.env.base_job_name}-single''' , instance_count=lowerCamelCase , instance_type=self.instance_type , debugger_hook_config=lowerCamelCase , hyperparameters={**self.env.hyperparameters, "model_name_or_path": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , py_version="py36" , )
def UpperCamelCase( self , lowerCamelCase ):
TrainingJobAnalytics(lowerCamelCase ).export_csv(F'''{self.env.test_path}/{job_name}_metrics.csv''' )
def UpperCamelCase( self ):
# create estimator
_snake_case = self.create_estimator()
# run training
estimator.fit()
# result dataframe
_snake_case = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
_snake_case = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"] )
_snake_case = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
_snake_case = (
Session().describe_training_job(estimator.latest_training_job.name ).get("TrainingTimeInSeconds" , 999_999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy )
assert all(t <= self.results["eval_loss"] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F'''{estimator.latest_training_job.name}.json''' , "w" ) as outfile:
json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss} , lowerCamelCase )
| 672 | 0 |
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = OrderedDict(
[
# Base model mapping
('albert', 'FlaxAlbertModel'),
('bart', 'FlaxBartModel'),
('beit', 'FlaxBeitModel'),
('bert', 'FlaxBertModel'),
('big_bird', 'FlaxBigBirdModel'),
('blenderbot', 'FlaxBlenderbotModel'),
('blenderbot-small', 'FlaxBlenderbotSmallModel'),
('clip', 'FlaxCLIPModel'),
('distilbert', 'FlaxDistilBertModel'),
('electra', 'FlaxElectraModel'),
('gpt-sw3', 'FlaxGPT2Model'),
('gpt2', 'FlaxGPT2Model'),
('gpt_neo', 'FlaxGPTNeoModel'),
('gptj', 'FlaxGPTJModel'),
('longt5', 'FlaxLongT5Model'),
('marian', 'FlaxMarianModel'),
('mbart', 'FlaxMBartModel'),
('mt5', 'FlaxMT5Model'),
('opt', 'FlaxOPTModel'),
('pegasus', 'FlaxPegasusModel'),
('regnet', 'FlaxRegNetModel'),
('resnet', 'FlaxResNetModel'),
('roberta', 'FlaxRobertaModel'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormModel'),
('roformer', 'FlaxRoFormerModel'),
('t5', 'FlaxT5Model'),
('vision-text-dual-encoder', 'FlaxVisionTextDualEncoderModel'),
('vit', 'FlaxViTModel'),
('wav2vec2', 'FlaxWav2Vec2Model'),
('whisper', 'FlaxWhisperModel'),
('xglm', 'FlaxXGLMModel'),
('xlm-roberta', 'FlaxXLMRobertaModel'),
]
)
_lowerCamelCase = OrderedDict(
[
# Model for pre-training mapping
('albert', 'FlaxAlbertForPreTraining'),
('bart', 'FlaxBartForConditionalGeneration'),
('bert', 'FlaxBertForPreTraining'),
('big_bird', 'FlaxBigBirdForPreTraining'),
('electra', 'FlaxElectraForPreTraining'),
('longt5', 'FlaxLongT5ForConditionalGeneration'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('mt5', 'FlaxMT5ForConditionalGeneration'),
('roberta', 'FlaxRobertaForMaskedLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'),
('roformer', 'FlaxRoFormerForMaskedLM'),
('t5', 'FlaxT5ForConditionalGeneration'),
('wav2vec2', 'FlaxWav2Vec2ForPreTraining'),
('whisper', 'FlaxWhisperForConditionalGeneration'),
('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'),
]
)
_lowerCamelCase = OrderedDict(
[
# Model for Masked LM mapping
('albert', 'FlaxAlbertForMaskedLM'),
('bart', 'FlaxBartForConditionalGeneration'),
('bert', 'FlaxBertForMaskedLM'),
('big_bird', 'FlaxBigBirdForMaskedLM'),
('distilbert', 'FlaxDistilBertForMaskedLM'),
('electra', 'FlaxElectraForMaskedLM'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('roberta', 'FlaxRobertaForMaskedLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'),
('roformer', 'FlaxRoFormerForMaskedLM'),
('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'),
]
)
_lowerCamelCase = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
('bart', 'FlaxBartForConditionalGeneration'),
('blenderbot', 'FlaxBlenderbotForConditionalGeneration'),
('blenderbot-small', 'FlaxBlenderbotSmallForConditionalGeneration'),
('encoder-decoder', 'FlaxEncoderDecoderModel'),
('longt5', 'FlaxLongT5ForConditionalGeneration'),
('marian', 'FlaxMarianMTModel'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('mt5', 'FlaxMT5ForConditionalGeneration'),
('pegasus', 'FlaxPegasusForConditionalGeneration'),
('t5', 'FlaxT5ForConditionalGeneration'),
]
)
_lowerCamelCase = OrderedDict(
[
# Model for Image-classsification
('beit', 'FlaxBeitForImageClassification'),
('regnet', 'FlaxRegNetForImageClassification'),
('resnet', 'FlaxResNetForImageClassification'),
('vit', 'FlaxViTForImageClassification'),
]
)
_lowerCamelCase = OrderedDict(
[
('vision-encoder-decoder', 'FlaxVisionEncoderDecoderModel'),
]
)
_lowerCamelCase = OrderedDict(
[
# Model for Causal LM mapping
('bart', 'FlaxBartForCausalLM'),
('bert', 'FlaxBertForCausalLM'),
('big_bird', 'FlaxBigBirdForCausalLM'),
('electra', 'FlaxElectraForCausalLM'),
('gpt-sw3', 'FlaxGPT2LMHeadModel'),
('gpt2', 'FlaxGPT2LMHeadModel'),
('gpt_neo', 'FlaxGPTNeoForCausalLM'),
('gptj', 'FlaxGPTJForCausalLM'),
('opt', 'FlaxOPTForCausalLM'),
('roberta', 'FlaxRobertaForCausalLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForCausalLM'),
('xglm', 'FlaxXGLMForCausalLM'),
('xlm-roberta', 'FlaxXLMRobertaForCausalLM'),
]
)
_lowerCamelCase = OrderedDict(
[
# Model for Sequence Classification mapping
('albert', 'FlaxAlbertForSequenceClassification'),
('bart', 'FlaxBartForSequenceClassification'),
('bert', 'FlaxBertForSequenceClassification'),
('big_bird', 'FlaxBigBirdForSequenceClassification'),
('distilbert', 'FlaxDistilBertForSequenceClassification'),
('electra', 'FlaxElectraForSequenceClassification'),
('mbart', 'FlaxMBartForSequenceClassification'),
('roberta', 'FlaxRobertaForSequenceClassification'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForSequenceClassification'),
('roformer', 'FlaxRoFormerForSequenceClassification'),
('xlm-roberta', 'FlaxXLMRobertaForSequenceClassification'),
]
)
_lowerCamelCase = OrderedDict(
[
# Model for Question Answering mapping
('albert', 'FlaxAlbertForQuestionAnswering'),
('bart', 'FlaxBartForQuestionAnswering'),
('bert', 'FlaxBertForQuestionAnswering'),
('big_bird', 'FlaxBigBirdForQuestionAnswering'),
('distilbert', 'FlaxDistilBertForQuestionAnswering'),
('electra', 'FlaxElectraForQuestionAnswering'),
('mbart', 'FlaxMBartForQuestionAnswering'),
('roberta', 'FlaxRobertaForQuestionAnswering'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForQuestionAnswering'),
('roformer', 'FlaxRoFormerForQuestionAnswering'),
('xlm-roberta', 'FlaxXLMRobertaForQuestionAnswering'),
]
)
_lowerCamelCase = OrderedDict(
[
# Model for Token Classification mapping
('albert', 'FlaxAlbertForTokenClassification'),
('bert', 'FlaxBertForTokenClassification'),
('big_bird', 'FlaxBigBirdForTokenClassification'),
('distilbert', 'FlaxDistilBertForTokenClassification'),
('electra', 'FlaxElectraForTokenClassification'),
('roberta', 'FlaxRobertaForTokenClassification'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForTokenClassification'),
('roformer', 'FlaxRoFormerForTokenClassification'),
('xlm-roberta', 'FlaxXLMRobertaForTokenClassification'),
]
)
_lowerCamelCase = OrderedDict(
[
# Model for Multiple Choice mapping
('albert', 'FlaxAlbertForMultipleChoice'),
('bert', 'FlaxBertForMultipleChoice'),
('big_bird', 'FlaxBigBirdForMultipleChoice'),
('distilbert', 'FlaxDistilBertForMultipleChoice'),
('electra', 'FlaxElectraForMultipleChoice'),
('roberta', 'FlaxRobertaForMultipleChoice'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMultipleChoice'),
('roformer', 'FlaxRoFormerForMultipleChoice'),
('xlm-roberta', 'FlaxXLMRobertaForMultipleChoice'),
]
)
_lowerCamelCase = OrderedDict(
[
('bert', 'FlaxBertForNextSentencePrediction'),
]
)
_lowerCamelCase = OrderedDict(
[
('speech-encoder-decoder', 'FlaxSpeechEncoderDecoderModel'),
('whisper', 'FlaxWhisperForConditionalGeneration'),
]
)
_lowerCamelCase = OrderedDict(
[
('whisper', 'FlaxWhisperForAudioClassification'),
]
)
_lowerCamelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
_lowerCamelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
_lowerCamelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
_lowerCamelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
_lowerCamelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
_lowerCamelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
_lowerCamelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
_lowerCamelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
_lowerCamelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
_lowerCamelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
_lowerCamelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
_lowerCamelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
_lowerCamelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
_lowerCamelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class UpperCamelCase_ ( _BaseAutoModelClass ):
lowerCamelCase_ = FLAX_MODEL_MAPPING
_lowerCamelCase = auto_class_update(FlaxAutoModel)
class UpperCamelCase_ ( _BaseAutoModelClass ):
lowerCamelCase_ = FLAX_MODEL_FOR_PRETRAINING_MAPPING
_lowerCamelCase = auto_class_update(FlaxAutoModelForPreTraining, head_doc='pretraining')
class UpperCamelCase_ ( _BaseAutoModelClass ):
lowerCamelCase_ = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
_lowerCamelCase = auto_class_update(FlaxAutoModelForCausalLM, head_doc='causal language modeling')
class UpperCamelCase_ ( _BaseAutoModelClass ):
lowerCamelCase_ = FLAX_MODEL_FOR_MASKED_LM_MAPPING
_lowerCamelCase = auto_class_update(FlaxAutoModelForMaskedLM, head_doc='masked language modeling')
class UpperCamelCase_ ( _BaseAutoModelClass ):
lowerCamelCase_ = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
_lowerCamelCase = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc='sequence-to-sequence language modeling', checkpoint_for_example='t5-base'
)
class UpperCamelCase_ ( _BaseAutoModelClass ):
lowerCamelCase_ = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
_lowerCamelCase = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc='sequence classification'
)
class UpperCamelCase_ ( _BaseAutoModelClass ):
lowerCamelCase_ = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
_lowerCamelCase = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc='question answering')
class UpperCamelCase_ ( _BaseAutoModelClass ):
lowerCamelCase_ = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
_lowerCamelCase = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc='token classification'
)
class UpperCamelCase_ ( _BaseAutoModelClass ):
lowerCamelCase_ = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
_lowerCamelCase = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc='multiple choice')
class UpperCamelCase_ ( _BaseAutoModelClass ):
lowerCamelCase_ = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
_lowerCamelCase = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc='next sentence prediction'
)
class UpperCamelCase_ ( _BaseAutoModelClass ):
lowerCamelCase_ = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
_lowerCamelCase = auto_class_update(
FlaxAutoModelForImageClassification, head_doc='image classification'
)
class UpperCamelCase_ ( _BaseAutoModelClass ):
lowerCamelCase_ = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
_lowerCamelCase = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc='vision-to-text modeling')
class UpperCamelCase_ ( _BaseAutoModelClass ):
lowerCamelCase_ = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
_lowerCamelCase = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc='sequence-to-sequence speech-to-text modeling'
) | 6 |
'''simple docstring'''
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( __UpperCamelCase ):
'''simple docstring'''
UpperCAmelCase__ : int = DistilBertTokenizer
UpperCAmelCase__ : Union[str, Any] = DistilBertTokenizerFast
UpperCAmelCase__ : List[str] = True
@slow
def UpperCamelCase( self ):
_snake_case = DistilBertTokenizer.from_pretrained("distilbert-base-uncased" )
_snake_case = tokenizer.encode("sequence builders" , add_special_tokens=lowerCamelCase )
_snake_case = tokenizer.encode("multi-sequence build" , add_special_tokens=lowerCamelCase )
_snake_case = tokenizer.build_inputs_with_special_tokens(lowerCamelCase )
_snake_case = tokenizer.build_inputs_with_special_tokens(lowerCamelCase , lowerCamelCase )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 672 | 0 |
"""simple docstring"""
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
a = get_tests_dir() + '''/test_data/fsmt/fsmt_val_data.json'''
with io.open(filename, '''r''', encoding='''utf-8''') as f:
a = json.load(f)
@require_torch
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self : Optional[Any] , _UpperCAmelCase : List[Any] ):
return FSMTTokenizer.from_pretrained(_UpperCAmelCase )
def lowerCAmelCase_ ( self : Any , _UpperCAmelCase : int ):
_A = FSMTForConditionalGeneration.from_pretrained(_UpperCAmelCase ).to(_UpperCAmelCase )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
['en-ru', 26.0],
['ru-en', 22.0],
['en-de', 22.0],
['de-en', 29.0],
] )
@slow
def lowerCAmelCase_ ( self : Union[str, Any] , _UpperCAmelCase : int , _UpperCAmelCase : List[Any] ):
# note: this test is not testing the best performance since it only evals a small batch
# but it should be enough to detect a regression in the output quality
_A = F'''facebook/wmt19-{pair}'''
_A = self.get_tokenizer(_UpperCAmelCase )
_A = self.get_model(_UpperCAmelCase )
_A = bleu_data[pair]['src']
_A = bleu_data[pair]['tgt']
_A = tokenizer(_UpperCAmelCase , return_tensors='pt' , truncation=_UpperCAmelCase , padding='longest' ).to(_UpperCAmelCase )
_A = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
_A = tokenizer.batch_decode(
_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase , clean_up_tokenization_spaces=_UpperCAmelCase )
_A = calculate_bleu(_UpperCAmelCase , _UpperCAmelCase )
print(_UpperCAmelCase )
self.assertGreaterEqual(scores['bleu'] , _UpperCAmelCase )
| 7 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__magic_name__ : Optional[int] = {
"""configuration_swiftformer""": [
"""SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""SwiftFormerConfig""",
"""SwiftFormerOnnxConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : Optional[int] = [
"""SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SwiftFormerForImageClassification""",
"""SwiftFormerModel""",
"""SwiftFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
__magic_name__ : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 672 | 0 |
'''simple docstring'''
import argparse
import os
import pickle
import sys
import torch
from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl
from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils
from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
# We do this to be able to load python 2 datasets pickles
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
lowercase__ : Optional[int] = data_utils.TransfoXLTokenizer
lowercase__ : str = data_utils.TransfoXLCorpus
lowercase__ : Union[str, Any] = data_utils
lowercase__ : int = data_utils
def _lowerCAmelCase ( __snake_case : str , __snake_case : Union[str, Any] , __snake_case : Dict , __snake_case : List[Any] ) -> Optional[Any]:
if transfo_xl_dataset_file:
# Convert a pre-processed corpus (see original TensorFlow repo)
with open(__snake_case , 'rb' ) as fp:
__A : Union[str, Any] = pickle.load(__snake_case , encoding='latin1' )
# Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
__A : Any = pytorch_dump_folder_path + '/' + VOCAB_FILES_NAMES['pretrained_vocab_file']
print(f'Save vocabulary to {pytorch_vocab_dump_path}' )
__A : Optional[Any] = corpus.vocab.__dict__
torch.save(__snake_case , __snake_case )
__A : List[str] = corpus.__dict__
corpus_dict_no_vocab.pop('vocab' , __snake_case )
__A : Dict = pytorch_dump_folder_path + '/' + CORPUS_NAME
print(f'Save dataset to {pytorch_dataset_dump_path}' )
torch.save(__snake_case , __snake_case )
if tf_checkpoint_path:
# Convert a pre-trained TensorFlow model
__A : Any = os.path.abspath(__snake_case )
__A : List[Any] = os.path.abspath(__snake_case )
print(f'Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.' )
# Initialise PyTorch model
if transfo_xl_config_file == "":
__A : List[str] = TransfoXLConfig()
else:
__A : str = TransfoXLConfig.from_json_file(__snake_case )
print(f'Building PyTorch model from configuration: {config}' )
__A : Any = TransfoXLLMHeadModel(__snake_case )
__A : Optional[Any] = load_tf_weights_in_transfo_xl(__snake_case , __snake_case , __snake_case )
# Save pytorch-model
__A : Dict = os.path.join(__snake_case , __snake_case )
__A : Optional[int] = os.path.join(__snake_case , __snake_case )
print(f'Save PyTorch model to {os.path.abspath(__snake_case )}' )
torch.save(model.state_dict() , __snake_case )
print(f'Save configuration file to {os.path.abspath(__snake_case )}' )
with open(__snake_case , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
lowercase__ : List[Any] = argparse.ArgumentParser()
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the folder to store the PyTorch model or dataset/vocab.''',
)
parser.add_argument(
'''--tf_checkpoint_path''',
default='''''',
type=str,
help='''An optional path to a TensorFlow checkpoint path to be converted.''',
)
parser.add_argument(
'''--transfo_xl_config_file''',
default='''''',
type=str,
help=(
'''An optional config json file corresponding to the pre-trained BERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--transfo_xl_dataset_file''',
default='''''',
type=str,
help='''An optional dataset file to be converted in a vocabulary.''',
)
lowercase__ : int = parser.parse_args()
convert_transfo_xl_checkpoint_to_pytorch(
args.tf_checkpoint_path,
args.transfo_xl_config_file,
args.pytorch_dump_folder_path,
args.transfo_xl_dataset_file,
) | 8 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
__magic_name__ : Union[str, Any] = logging.get_logger(__name__)
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=False ):
'''simple docstring'''
_snake_case = "backbone." if is_semantic else ""
_snake_case = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''{prefix}blocks.{i}.norm1.weight''', f'''beit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm1.bias''', f'''beit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(f'''{prefix}blocks.{i}.attn.proj.weight''', f'''beit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append(
(f'''{prefix}blocks.{i}.attn.proj.bias''', f'''beit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm2.weight''', f'''beit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm2.bias''', f'''beit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc1.weight''', f'''beit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc1.bias''', f'''beit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc2.weight''', f'''beit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc2.bias''', f'''beit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
(f'''{prefix}cls_token''', "beit.embeddings.cls_token"),
(f'''{prefix}patch_embed.proj.weight''', "beit.embeddings.patch_embeddings.projection.weight"),
(f'''{prefix}patch_embed.proj.bias''', "beit.embeddings.patch_embeddings.projection.bias"),
(f'''{prefix}pos_embed''', "beit.embeddings.position_embeddings"),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
("mask_token", "beit.embeddings.mask_token"),
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
("fc_norm.weight", "beit.pooler.layernorm.weight"),
("fc_norm.bias", "beit.pooler.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=False ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
_snake_case = "backbone." if is_semantic else ""
# queries, keys and values
_snake_case = state_dict.pop(f'''{prefix}blocks.{i}.attn.qkv.weight''' )
_snake_case = state_dict.pop(f'''{prefix}blocks.{i}.attn.q_bias''' )
_snake_case = state_dict.pop(f'''{prefix}blocks.{i}.attn.v_bias''' )
_snake_case = in_proj_weight[
: config.hidden_size, :
]
_snake_case = q_bias
_snake_case = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_snake_case = in_proj_weight[
-config.hidden_size :, :
]
_snake_case = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
_snake_case = state_dict.pop(f'''{prefix}blocks.{i}.gamma_1''' )
_snake_case = state_dict.pop(f'''{prefix}blocks.{i}.gamma_2''' )
_snake_case = gamma_a
_snake_case = gamma_a
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
_snake_case = dct.pop(SCREAMING_SNAKE_CASE__ )
_snake_case = val
def snake_case_ ( ):
'''simple docstring'''
_snake_case = "http://images.cocodataset.org/val2017/000000039769.jpg"
_snake_case = Image.open(requests.get(SCREAMING_SNAKE_CASE__ , stream=SCREAMING_SNAKE_CASE__ ).raw )
return im
@torch.no_grad()
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False ):
'''simple docstring'''
_snake_case = False if "rvlcdip" in checkpoint_url else True
_snake_case = BeitConfig(use_absolute_position_embeddings=SCREAMING_SNAKE_CASE__ , use_mask_token=SCREAMING_SNAKE_CASE__ )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
_snake_case = 10_24
_snake_case = 40_96
_snake_case = 24
_snake_case = 16
# labels
if "rvlcdip" in checkpoint_url:
_snake_case = 16
_snake_case = "huggingface/label-files"
_snake_case = "rvlcdip-id2label.json"
_snake_case = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , repo_type="dataset" ) , "r" ) )
_snake_case = {int(SCREAMING_SNAKE_CASE__ ): v for k, v in idalabel.items()}
_snake_case = idalabel
_snake_case = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
_snake_case = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE__ , map_location="cpu" )["model"]
_snake_case = create_rename_keys(SCREAMING_SNAKE_CASE__ , has_lm_head=SCREAMING_SNAKE_CASE__ )
for src, dest in rename_keys:
rename_key(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
read_in_q_k_v(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , has_lm_head=SCREAMING_SNAKE_CASE__ )
# load HuggingFace model
_snake_case = BeitForMaskedImageModeling(SCREAMING_SNAKE_CASE__ ) if has_lm_head else BeitForImageClassification(SCREAMING_SNAKE_CASE__ )
model.eval()
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
# Check outputs on an image
_snake_case = BeitImageProcessor(
size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=SCREAMING_SNAKE_CASE__ )
_snake_case = prepare_img()
_snake_case = image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors="pt" )
_snake_case = encoding["pixel_values"]
_snake_case = model(SCREAMING_SNAKE_CASE__ )
_snake_case = outputs.logits
# verify logits
_snake_case = [1, 16] if "rvlcdip" in checkpoint_url else [1, 1_96, 81_92]
assert logits.shape == torch.Size(SCREAMING_SNAKE_CASE__ ), "Shape of logits not as expected"
Path(SCREAMING_SNAKE_CASE__ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE__ )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
if push_to_hub:
if has_lm_head:
_snake_case = "dit-base" if "base" in checkpoint_url else "dit-large"
else:
_snake_case = "dit-base-finetuned-rvlcdip" if "dit-b" in checkpoint_url else "dit-large-finetuned-rvlcdip"
image_processor.push_to_hub(
repo_path_or_name=Path(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , organization="nielsr" , commit_message="Add image processor" , use_temp_dir=SCREAMING_SNAKE_CASE__ , )
model.push_to_hub(
repo_path_or_name=Path(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , organization="nielsr" , commit_message="Add model" , use_temp_dir=SCREAMING_SNAKE_CASE__ , )
if __name__ == "__main__":
__magic_name__ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_url""",
default="""https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth""",
type=str,
help="""URL to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
)
__magic_name__ : Dict = parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 672 | 0 |
from datetime import datetime
import requests
def A ( __UpperCamelCase ) -> bytes:
A__ = 'https://downloadgram.net/wp-json/wppress/video-downloader/video?url='
A__ = requests.get(base_url + url ).json()[0]['urls'][0]['src']
return requests.get(__UpperCamelCase ).content
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = input('''Enter Video/IGTV url: ''').strip()
SCREAMING_SNAKE_CASE__ = f'{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4'
with open(file_name, '''wb''') as fp:
fp.write(download_video(url))
print(f'Done. Video saved to disk as {file_name}.')
| 9 |
'''simple docstring'''
import math
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not number % 2:
# Negatives, 0, 1 and all even numbers are not primes
return False
_snake_case = range(3 , int(math.sqrt(SCREAMING_SNAKE_CASE__ ) + 1 ) , 2 )
return not any(not number % i for i in odd_numbers )
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=1 , **SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
_snake_case = factor * value
_snake_case = value
while not is_prime(SCREAMING_SNAKE_CASE__ ):
value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1 , **SCREAMING_SNAKE_CASE__ )
return value
| 672 | 0 |
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class lowerCAmelCase_ ( __lowercase ):
UpperCAmelCase = (PNDMScheduler,)
UpperCAmelCase = (("num_inference_steps", 50),)
def UpperCamelCase_ ( self : str , **_A : Dict ):
_UpperCamelCase = {
'''num_train_timesteps''': 1000,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
}
config.update(**_A )
return config
def UpperCamelCase_ ( self : int , _A : Tuple=0 , **_A : Optional[int] ):
_UpperCamelCase = dict(self.forward_default_kwargs )
_UpperCamelCase = kwargs.pop('''num_inference_steps''' , _A )
_UpperCamelCase = self.dummy_sample
_UpperCamelCase = 0.1 * sample
_UpperCamelCase = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
_UpperCamelCase = self.get_scheduler_config(**_A )
_UpperCamelCase = scheduler_class(**_A )
scheduler.set_timesteps(_A )
# copy over dummy past residuals
_UpperCamelCase = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_A )
_UpperCamelCase = scheduler_class.from_pretrained(_A )
new_scheduler.set_timesteps(_A )
# copy over dummy past residuals
_UpperCamelCase = dummy_past_residuals[:]
_UpperCamelCase = scheduler.step_prk(_A , _A , _A , **_A ).prev_sample
_UpperCamelCase = new_scheduler.step_prk(_A , _A , _A , **_A ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
_UpperCamelCase = scheduler.step_plms(_A , _A , _A , **_A ).prev_sample
_UpperCamelCase = new_scheduler.step_plms(_A , _A , _A , **_A ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def UpperCamelCase_ ( self : Optional[Any] ):
pass
def UpperCamelCase_ ( self : List[str] , _A : str=0 , **_A : str ):
_UpperCamelCase = dict(self.forward_default_kwargs )
_UpperCamelCase = kwargs.pop('''num_inference_steps''' , _A )
_UpperCamelCase = self.dummy_sample
_UpperCamelCase = 0.1 * sample
_UpperCamelCase = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
_UpperCamelCase = self.get_scheduler_config()
_UpperCamelCase = scheduler_class(**_A )
scheduler.set_timesteps(_A )
# copy over dummy past residuals (must be after setting timesteps)
_UpperCamelCase = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_A )
_UpperCamelCase = scheduler_class.from_pretrained(_A )
# copy over dummy past residuals
new_scheduler.set_timesteps(_A )
# copy over dummy past residual (must be after setting timesteps)
_UpperCamelCase = dummy_past_residuals[:]
_UpperCamelCase = scheduler.step_prk(_A , _A , _A , **_A ).prev_sample
_UpperCamelCase = new_scheduler.step_prk(_A , _A , _A , **_A ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
_UpperCamelCase = scheduler.step_plms(_A , _A , _A , **_A ).prev_sample
_UpperCamelCase = new_scheduler.step_plms(_A , _A , _A , **_A ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def UpperCamelCase_ ( self : str , **_A : Any ):
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config(**_A )
_UpperCamelCase = scheduler_class(**_A )
_UpperCamelCase = 10
_UpperCamelCase = self.dummy_model()
_UpperCamelCase = self.dummy_sample_deter
scheduler.set_timesteps(_A )
for i, t in enumerate(scheduler.prk_timesteps ):
_UpperCamelCase = model(_A , _A )
_UpperCamelCase = scheduler.step_prk(_A , _A , _A ).prev_sample
for i, t in enumerate(scheduler.plms_timesteps ):
_UpperCamelCase = model(_A , _A )
_UpperCamelCase = scheduler.step_plms(_A , _A , _A ).prev_sample
return sample
def UpperCamelCase_ ( self : Dict ):
_UpperCamelCase = dict(self.forward_default_kwargs )
_UpperCamelCase = kwargs.pop('''num_inference_steps''' , _A )
for scheduler_class in self.scheduler_classes:
_UpperCamelCase = self.get_scheduler_config()
_UpperCamelCase = scheduler_class(**_A )
_UpperCamelCase = self.dummy_sample
_UpperCamelCase = 0.1 * sample
if num_inference_steps is not None and hasattr(_A , '''set_timesteps''' ):
scheduler.set_timesteps(_A )
elif num_inference_steps is not None and not hasattr(_A , '''set_timesteps''' ):
_UpperCamelCase = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
_UpperCamelCase = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
_UpperCamelCase = dummy_past_residuals[:]
_UpperCamelCase = scheduler.step_prk(_A , 0 , _A , **_A ).prev_sample
_UpperCamelCase = scheduler.step_prk(_A , 1 , _A , **_A ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
_UpperCamelCase = scheduler.step_plms(_A , 0 , _A , **_A ).prev_sample
_UpperCamelCase = scheduler.step_plms(_A , 1 , _A , **_A ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def UpperCamelCase_ ( self : Any ):
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=_A )
def UpperCamelCase_ ( self : Tuple ):
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=_A )
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config(steps_offset=1 )
_UpperCamelCase = scheduler_class(**_A )
scheduler.set_timesteps(10 )
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[901, 851, 851, 801, 801, 751, 751, 701, 701, 651, 651, 601, 601, 501, 401, 301, 201, 101, 1] ) , )
def UpperCamelCase_ ( self : List[str] ):
for beta_start, beta_end in zip([0.0001, 0.001] , [0.002, 0.02] ):
self.check_over_configs(beta_start=_A , beta_end=_A )
def UpperCamelCase_ ( self : str ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_A )
def UpperCamelCase_ ( self : Dict ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_A )
def UpperCamelCase_ ( self : Union[str, Any] ):
for t in [1, 5, 10]:
self.check_over_forward(time_step=_A )
def UpperCamelCase_ ( self : Tuple ):
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=_A )
def UpperCamelCase_ ( self : Dict ):
# earlier version of set_timesteps() caused an error indexing alpha's with inference steps as power of 3
_UpperCamelCase = 27
for scheduler_class in self.scheduler_classes:
_UpperCamelCase = self.dummy_sample
_UpperCamelCase = 0.1 * sample
_UpperCamelCase = self.get_scheduler_config()
_UpperCamelCase = scheduler_class(**_A )
scheduler.set_timesteps(_A )
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2] ):
_UpperCamelCase = scheduler.step_prk(_A , _A , _A ).prev_sample
def UpperCamelCase_ ( self : Union[str, Any] ):
with self.assertRaises(_A ):
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config()
_UpperCamelCase = scheduler_class(**_A )
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample
def UpperCamelCase_ ( self : Any ):
_UpperCamelCase = self.full_loop()
_UpperCamelCase = torch.sum(torch.abs(_A ) )
_UpperCamelCase = torch.mean(torch.abs(_A ) )
assert abs(result_sum.item() - 198.1318 ) < 1e-2
assert abs(result_mean.item() - 0.2580 ) < 1e-3
def UpperCamelCase_ ( self : List[Any] ):
_UpperCamelCase = self.full_loop(prediction_type='''v_prediction''' )
_UpperCamelCase = torch.sum(torch.abs(_A ) )
_UpperCamelCase = torch.mean(torch.abs(_A ) )
assert abs(result_sum.item() - 67.3986 ) < 1e-2
assert abs(result_mean.item() - 0.0878 ) < 1e-3
def UpperCamelCase_ ( self : str ):
# We specify different beta, so that the first alpha is 0.99
_UpperCamelCase = self.full_loop(set_alpha_to_one=_A , beta_start=0.01 )
_UpperCamelCase = torch.sum(torch.abs(_A ) )
_UpperCamelCase = torch.mean(torch.abs(_A ) )
assert abs(result_sum.item() - 230.0399 ) < 1e-2
assert abs(result_mean.item() - 0.2995 ) < 1e-3
def UpperCamelCase_ ( self : Any ):
# We specify different beta, so that the first alpha is 0.99
_UpperCamelCase = self.full_loop(set_alpha_to_one=_A , beta_start=0.01 )
_UpperCamelCase = torch.sum(torch.abs(_A ) )
_UpperCamelCase = torch.mean(torch.abs(_A ) )
assert abs(result_sum.item() - 186.9482 ) < 1e-2
assert abs(result_mean.item() - 0.2434 ) < 1e-3
| 10 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__magic_name__ : Dict = {
"""configuration_pix2struct""": [
"""PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""Pix2StructConfig""",
"""Pix2StructTextConfig""",
"""Pix2StructVisionConfig""",
],
"""processing_pix2struct""": ["""Pix2StructProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : List[str] = ["""Pix2StructImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : List[Any] = [
"""PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Pix2StructPreTrainedModel""",
"""Pix2StructForConditionalGeneration""",
"""Pix2StructVisionModel""",
"""Pix2StructTextModel""",
]
if TYPE_CHECKING:
from .configuration_pixastruct import (
PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP,
PixaStructConfig,
PixaStructTextConfig,
PixaStructVisionConfig,
)
from .processing_pixastruct import PixaStructProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_pixastruct import PixaStructImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pixastruct import (
PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST,
PixaStructForConditionalGeneration,
PixaStructPreTrainedModel,
PixaStructTextModel,
PixaStructVisionModel,
)
else:
import sys
__magic_name__ : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 672 | 0 |
'''simple docstring'''
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def lowerCAmelCase (__A , __A):
"""simple docstring"""
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
_a = flax_key_tuple[:-1] + ('''weight''',)
_a = torch.permute(__A , (0, 2, 1))
elif flax_key_tuple[-1] == "kernel" and ".".join(__A):
# linear layer
_a = flax_key_tuple[:-1] + ('''weight''',)
_a = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
_a = flax_key_tuple[:-1] + ('''weight''',)
return flax_key_tuple, flax_tensor
def lowerCAmelCase (__A , __A , __A):
"""simple docstring"""
if "metadata" in layer:
_a = layer.split('''metadata''')
_a = ''''''.join(split_layer[0])[:-1]
_a = [tuple(('''metadata''' + split_layer[1]).split('''/'''))]
elif "kvstore" in layer:
_a = layer.split('''kvstore''')
_a = ''''''.join(split_layer[0])[:-1]
_a = [tuple(('''kvstore''' + split_layer[1]).split('''/'''))]
else:
_a = layer.split('''/''')
_a = '''/'''.join(split_layer[:-1])
_a = (split_layer[-1],)
if "kvstore/path" in layer:
_a = F'''{switch_checkpoint_path}/{checkpoint_info[layer]}'''
elif "kvstore/driver" in layer:
_a = '''file'''
else:
_a = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def lowerCAmelCase (__A , __A):
"""simple docstring"""
_a = rename_keys(__A)
_a = {}
for k, v in current_block.items():
_a = v
_a = new_current_block
torch.save(__A , __A)
def lowerCAmelCase (__A , __A , __A , __A , __A = WEIGHTS_NAME):
"""simple docstring"""
_a = convert_file_size_to_int(__A)
_a = []
_a = {}
_a = 0
_a = 0
os.makedirs(__A , exist_ok=__A)
with gfile.GFile(switch_checkpoint_path + '''/checkpoint''' , '''rb''') as fp:
_a = serialization.msgpack_restore(fp.read())['''optimizer''']['''target''']
_a = flatten_dict(__A , sep='''/''')
_a = {}
for layer in checkpoint_info.keys():
_a , _a , _a = get_key_and_tensorstore_dict(
__A , __A , __A)
if curr_real_layer_name in all_layers:
_a = content
else:
_a = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
_a = ts.open(unflatten_dict(all_layers[key])).result().read().result()
_a = torch.tensor(__A)
_a = raw_weights.numel() * dtype_byte_size(raw_weights.dtype)
# use the renaming pattern from the small conversion scripts
_a , _a = rename_base_flax_keys(tuple(key.split('''/''')) , __A)
_a = '''/'''.join(__A)
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
_a = os.path.join(
__A , weights_name.replace('''.bin''' , F'''-{len(__A)+1:05d}-of-???.bin'''))
rename_and_save_block(__A , __A)
sharded_state_dicts.append(current_block.keys())
del current_block
_a = {}
_a = 0
_a = raw_weights.to(getattr(__A , __A))
current_block_size += weight_size
total_size += weight_size
# Add the last block
_a = os.path.join(__A , weights_name.replace('''.bin''' , F'''-{len(__A)+1:05d}-of-???.bin'''))
rename_and_save_block(__A , __A)
sharded_state_dicts.append(current_block.keys())
# If we only have one shard, we return it
if len(__A) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
_a = {}
_a = {}
for idx, shard in enumerate(__A):
_a = weights_name.replace(
'''.bin''' , F'''-{idx+1:05d}-of-{len(__A):05d}.bin''') # len(sharded_state_dicts):05d}
_a = os.path.join(__A , weights_name.replace('''.bin''' , F'''-{idx+1:05d}-of-???.bin'''))
os.rename(__A , os.path.join(__A , __A))
_a = shard
for key in shard:
_a = shard_file
# Add the metadata
_a = {'''total_size''': total_size}
_a = {'''metadata''': metadata, '''weight_map''': weight_map}
with open(os.path.join(__A , __A) , '''w''' , encoding='''utf-8''') as f:
_a = json.dumps(__A , indent=2 , sort_keys=__A) + '''\n'''
f.write(__A)
return metadata, index
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--switch_t5x_checkpoint_path",
default="/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600",
type=str,
required=False,
help="Path to a directory containing a folder per layer. Follows the original Google format.",
)
parser.add_argument("--max_shard_size", default="10GB", required=False, help="Max shard size")
parser.add_argument("--dtype", default="bfloat16", type=str, required=False, help="dtype of the saved model")
parser.add_argument(
"--pytorch_dump_folder_path",
default="/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted",
type=str,
required=False,
help="Path to the output pytorch model.",
)
lowercase_ = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def lowerCAmelCase ():
"""simple docstring"""
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
_a = SwitchTransformersConfig.from_pretrained('''google/switch-base-8''')
config.save_pretrained('''/home/arthur_huggingface_co/transformers/switch_converted''')
_a = SwitchTransformersForConditionalGeneration.from_pretrained(
'''/home/arthur_huggingface_co/transformers/switch_converted''' , device_map='''auto''')
_a = TaTokenizer.from_pretrained('''t5-small''')
_a = '''A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.'''
_a = tokenizer(__A , return_tensors='''pt''').input_ids
_a = model.generate(__A , decoder_start_token_id=0)
print(tokenizer.decode(out[0]))
| 11 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
__magic_name__ : Optional[int] = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( __UpperCamelCase ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = ['''pixel_values''']
def __init__( self , lowerCamelCase = True , lowerCamelCase = None , lowerCamelCase = PILImageResampling.BILINEAR , lowerCamelCase = True , lowerCamelCase = None , lowerCamelCase = True , lowerCamelCase = 1 / 255 , lowerCamelCase = True , lowerCamelCase = None , lowerCamelCase = None , **lowerCamelCase , ):
super().__init__(**lowerCamelCase )
_snake_case = size if size is not None else {"shortest_edge": 256}
_snake_case = get_size_dict(lowerCamelCase , default_to_square=lowerCamelCase )
_snake_case = crop_size if crop_size is not None else {"height": 224, "width": 224}
_snake_case = get_size_dict(lowerCamelCase )
_snake_case = do_resize
_snake_case = size
_snake_case = resample
_snake_case = do_center_crop
_snake_case = crop_size
_snake_case = do_rescale
_snake_case = rescale_factor
_snake_case = do_normalize
_snake_case = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_snake_case = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCamelCase( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = PILImageResampling.BICUBIC , lowerCamelCase = None , **lowerCamelCase , ):
_snake_case = get_size_dict(lowerCamelCase , default_to_square=lowerCamelCase )
if "shortest_edge" not in size:
raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
_snake_case = get_resize_output_image_size(lowerCamelCase , size=size["shortest_edge"] , default_to_square=lowerCamelCase )
return resize(lowerCamelCase , size=lowerCamelCase , resample=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def UpperCamelCase( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase , ):
_snake_case = get_size_dict(lowerCamelCase )
return center_crop(lowerCamelCase , size=(size["height"], size["width"]) , data_format=lowerCamelCase , **lowerCamelCase )
def UpperCamelCase( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase ):
return rescale(lowerCamelCase , scale=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def UpperCamelCase( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase , ):
return normalize(lowerCamelCase , mean=lowerCamelCase , std=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def UpperCamelCase( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = ChannelDimension.FIRST , **lowerCamelCase , ):
_snake_case = do_resize if do_resize is not None else self.do_resize
_snake_case = size if size is not None else self.size
_snake_case = get_size_dict(lowerCamelCase , default_to_square=lowerCamelCase )
_snake_case = resample if resample is not None else self.resample
_snake_case = do_center_crop if do_center_crop is not None else self.do_center_crop
_snake_case = crop_size if crop_size is not None else self.crop_size
_snake_case = get_size_dict(lowerCamelCase )
_snake_case = do_rescale if do_rescale is not None else self.do_rescale
_snake_case = rescale_factor if rescale_factor is not None else self.rescale_factor
_snake_case = do_normalize if do_normalize is not None else self.do_normalize
_snake_case = image_mean if image_mean is not None else self.image_mean
_snake_case = image_std if image_std is not None else self.image_std
_snake_case = make_list_of_images(lowerCamelCase )
if not valid_images(lowerCamelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
_snake_case = [to_numpy_array(lowerCamelCase ) for image in images]
if do_resize:
_snake_case = [self.resize(image=lowerCamelCase , size=lowerCamelCase , resample=lowerCamelCase ) for image in images]
if do_center_crop:
_snake_case = [self.center_crop(image=lowerCamelCase , size=lowerCamelCase ) for image in images]
if do_rescale:
_snake_case = [self.rescale(image=lowerCamelCase , scale=lowerCamelCase ) for image in images]
if do_normalize:
_snake_case = [self.normalize(image=lowerCamelCase , mean=lowerCamelCase , std=lowerCamelCase ) for image in images]
_snake_case = [to_channel_dimension_format(lowerCamelCase , lowerCamelCase ) for image in images]
_snake_case = {"pixel_values": images}
return BatchFeature(data=lowerCamelCase , tensor_type=lowerCamelCase )
| 672 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import (
DiffusionPipeline,
UnCLIPImageVariationPipeline,
UnCLIPScheduler,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.pipelines.unclip.text_proj import UnCLIPTextProjModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, load_image, require_torch_gpu, skip_mps
from ..pipeline_params import IMAGE_VARIATION_BATCH_PARAMS, IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _snake_case ( UpperCAmelCase_ , unittest.TestCase ):
__lowerCAmelCase : Union[str, Any] = UnCLIPImageVariationPipeline
__lowerCAmelCase : List[Any] = IMAGE_VARIATION_PARAMS - {'height', 'width', 'guidance_scale'}
__lowerCAmelCase : Optional[int] = IMAGE_VARIATION_BATCH_PARAMS
__lowerCAmelCase : Tuple = [
'generator',
'return_dict',
'decoder_num_inference_steps',
'super_res_num_inference_steps',
]
__lowerCAmelCase : int = False
@property
def lowercase__ ( self):
'''simple docstring'''
return 32
@property
def lowercase__ ( self):
'''simple docstring'''
return 32
@property
def lowercase__ ( self):
'''simple docstring'''
return self.time_input_dim
@property
def lowercase__ ( self):
'''simple docstring'''
return self.time_input_dim * 4
@property
def lowercase__ ( self):
'''simple docstring'''
return 1_00
@property
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Optional[int] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""")
return tokenizer
@property
def lowercase__ ( self):
'''simple docstring'''
torch.manual_seed(0)
lowercase__ : List[str] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
return CLIPTextModelWithProjection(SCREAMING_SNAKE_CASE_)
@property
def lowercase__ ( self):
'''simple docstring'''
torch.manual_seed(0)
lowercase__ : Any = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , )
return CLIPVisionModelWithProjection(SCREAMING_SNAKE_CASE_)
@property
def lowercase__ ( self):
'''simple docstring'''
torch.manual_seed(0)
lowercase__ : List[Any] = {
"""clip_embeddings_dim""": self.text_embedder_hidden_size,
"""time_embed_dim""": self.time_embed_dim,
"""cross_attention_dim""": self.cross_attention_dim,
}
lowercase__ : List[str] = UnCLIPTextProjModel(**SCREAMING_SNAKE_CASE_)
return model
@property
def lowercase__ ( self):
'''simple docstring'''
torch.manual_seed(0)
lowercase__ : List[str] = {
"""sample_size""": 32,
# RGB in channels
"""in_channels""": 3,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 6,
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": """identity""",
}
lowercase__ : Optional[int] = UNetaDConditionModel(**SCREAMING_SNAKE_CASE_)
return model
@property
def lowercase__ ( self):
'''simple docstring'''
return {
"sample_size": 64,
"layers_per_block": 1,
"down_block_types": ("ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D"),
"up_block_types": ("ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D"),
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"in_channels": 6,
"out_channels": 3,
}
@property
def lowercase__ ( self):
'''simple docstring'''
torch.manual_seed(0)
lowercase__ : Any = UNetaDModel(**self.dummy_super_res_kwargs)
return model
@property
def lowercase__ ( self):
'''simple docstring'''
torch.manual_seed(1)
lowercase__ : Any = UNetaDModel(**self.dummy_super_res_kwargs)
return model
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : int = self.dummy_decoder
lowercase__ : Any = self.dummy_text_proj
lowercase__ : Optional[int] = self.dummy_text_encoder
lowercase__ : Optional[int] = self.dummy_tokenizer
lowercase__ : Tuple = self.dummy_super_res_first
lowercase__ : List[Any] = self.dummy_super_res_last
lowercase__ : Optional[Any] = UnCLIPScheduler(
variance_type="""learned_range""" , prediction_type="""epsilon""" , num_train_timesteps=10_00 , )
lowercase__ : Dict = UnCLIPScheduler(
variance_type="""fixed_small_log""" , prediction_type="""epsilon""" , num_train_timesteps=10_00 , )
lowercase__ : Tuple = CLIPImageProcessor(crop_size=32 , size=32)
lowercase__ : List[str] = self.dummy_image_encoder
return {
"decoder": decoder,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"text_proj": text_proj,
"feature_extractor": feature_extractor,
"image_encoder": image_encoder,
"super_res_first": super_res_first,
"super_res_last": super_res_last,
"decoder_scheduler": decoder_scheduler,
"super_res_scheduler": super_res_scheduler,
}
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=True):
'''simple docstring'''
lowercase__ : Union[str, Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(SCREAMING_SNAKE_CASE_)).to(SCREAMING_SNAKE_CASE_)
if str(SCREAMING_SNAKE_CASE_).startswith("""mps"""):
lowercase__ : List[Any] = torch.manual_seed(SCREAMING_SNAKE_CASE_)
else:
lowercase__ : int = torch.Generator(device=SCREAMING_SNAKE_CASE_).manual_seed(SCREAMING_SNAKE_CASE_)
if pil_image:
lowercase__ : Any = input_image * 0.5 + 0.5
lowercase__ : Any = input_image.clamp(0 , 1)
lowercase__ : Tuple = input_image.cpu().permute(0 , 2 , 3 , 1).float().numpy()
lowercase__ : Union[str, Any] = DiffusionPipeline.numpy_to_pil(SCREAMING_SNAKE_CASE_)[0]
return {
"image": input_image,
"generator": generator,
"decoder_num_inference_steps": 2,
"super_res_num_inference_steps": 2,
"output_type": "np",
}
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Tuple = """cpu"""
lowercase__ : List[str] = self.get_dummy_components()
lowercase__ : int = self.pipeline_class(**SCREAMING_SNAKE_CASE_)
lowercase__ : Optional[Any] = pipe.to(SCREAMING_SNAKE_CASE_)
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_)
lowercase__ : List[str] = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ , pil_image=SCREAMING_SNAKE_CASE_)
lowercase__ : Union[str, Any] = pipe(**SCREAMING_SNAKE_CASE_)
lowercase__ : Optional[Any] = output.images
lowercase__ : Union[str, Any] = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ , pil_image=SCREAMING_SNAKE_CASE_)
lowercase__ : Dict = pipe(
**SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , )[0]
lowercase__ : Any = image[0, -3:, -3:, -1]
lowercase__ : List[str] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase__ : str = np.array(
[
0.9_9_9_7,
0.0_0_0_2,
0.9_9_9_7,
0.9_9_9_7,
0.9_9_6_9,
0.0_0_2_3,
0.9_9_9_7,
0.9_9_6_9,
0.9_9_7_0,
])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1E-2
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : List[str] = """cpu"""
lowercase__ : Optional[int] = self.get_dummy_components()
lowercase__ : List[Any] = self.pipeline_class(**SCREAMING_SNAKE_CASE_)
lowercase__ : List[Any] = pipe.to(SCREAMING_SNAKE_CASE_)
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_)
lowercase__ : Optional[Any] = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ , pil_image=SCREAMING_SNAKE_CASE_)
lowercase__ : Optional[int] = pipe(**SCREAMING_SNAKE_CASE_)
lowercase__ : int = output.images
lowercase__ : List[str] = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ , pil_image=SCREAMING_SNAKE_CASE_)
lowercase__ : int = pipe(
**SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , )[0]
lowercase__ : Optional[Any] = image[0, -3:, -3:, -1]
lowercase__ : Optional[int] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase__ : str = np.array([0.9_9_9_7, 0.0_0_0_3, 0.9_9_9_7, 0.9_9_9_7, 0.9_9_7_0, 0.0_0_2_4, 0.9_9_9_7, 0.9_9_7_1, 0.9_9_7_1])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1E-2
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Union[str, Any] = """cpu"""
lowercase__ : Union[str, Any] = self.get_dummy_components()
lowercase__ : str = self.pipeline_class(**SCREAMING_SNAKE_CASE_)
lowercase__ : Tuple = pipe.to(SCREAMING_SNAKE_CASE_)
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_)
lowercase__ : Tuple = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ , pil_image=SCREAMING_SNAKE_CASE_)
lowercase__ : Optional[Any] = [
pipeline_inputs["""image"""],
pipeline_inputs["""image"""],
]
lowercase__ : List[str] = pipe(**SCREAMING_SNAKE_CASE_)
lowercase__ : str = output.images
lowercase__ : int = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ , pil_image=SCREAMING_SNAKE_CASE_)
lowercase__ : str = [
tuple_pipeline_inputs["""image"""],
tuple_pipeline_inputs["""image"""],
]
lowercase__ : Optional[int] = pipe(
**SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , )[0]
lowercase__ : Optional[Any] = image[0, -3:, -3:, -1]
lowercase__ : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (2, 64, 64, 3)
lowercase__ : List[str] = np.array(
[
0.9_9_9_7,
0.9_9_8_9,
0.0_0_0_8,
0.0_0_2_1,
0.9_9_6_0,
0.0_0_1_8,
0.0_0_1_4,
0.0_0_0_2,
0.9_9_3_3,
])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1E-2
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Optional[Any] = torch.device("""cpu""")
class _snake_case :
__lowerCAmelCase : Optional[Any] = 1
lowercase__ : int = self.get_dummy_components()
lowercase__ : Tuple = self.pipeline_class(**SCREAMING_SNAKE_CASE_)
lowercase__ : Optional[int] = pipe.to(SCREAMING_SNAKE_CASE_)
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_)
lowercase__ : Optional[int] = torch.Generator(device=SCREAMING_SNAKE_CASE_).manual_seed(0)
lowercase__ : List[str] = pipe.decoder.dtype
lowercase__ : int = 1
lowercase__ : Dict = (
batch_size,
pipe.decoder.config.in_channels,
pipe.decoder.config.sample_size,
pipe.decoder.config.sample_size,
)
lowercase__ : Tuple = pipe.prepare_latents(
SCREAMING_SNAKE_CASE_ , dtype=SCREAMING_SNAKE_CASE_ , device=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , latents=SCREAMING_SNAKE_CASE_ , scheduler=DummyScheduler())
lowercase__ : Tuple = (
batch_size,
pipe.super_res_first.config.in_channels // 2,
pipe.super_res_first.config.sample_size,
pipe.super_res_first.config.sample_size,
)
lowercase__ : str = pipe.prepare_latents(
SCREAMING_SNAKE_CASE_ , dtype=SCREAMING_SNAKE_CASE_ , device=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , latents=SCREAMING_SNAKE_CASE_ , scheduler=DummyScheduler())
lowercase__ : List[Any] = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ , pil_image=SCREAMING_SNAKE_CASE_)
lowercase__ : Union[str, Any] = pipe(
**SCREAMING_SNAKE_CASE_ , decoder_latents=SCREAMING_SNAKE_CASE_ , super_res_latents=SCREAMING_SNAKE_CASE_).images
lowercase__ : List[Any] = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ , pil_image=SCREAMING_SNAKE_CASE_)
# Don't pass image, instead pass embedding
lowercase__ : List[str] = pipeline_inputs.pop("""image""")
lowercase__ : Optional[int] = pipe.image_encoder(SCREAMING_SNAKE_CASE_).image_embeds
lowercase__ : Union[str, Any] = pipe(
**SCREAMING_SNAKE_CASE_ , decoder_latents=SCREAMING_SNAKE_CASE_ , super_res_latents=SCREAMING_SNAKE_CASE_ , image_embeddings=SCREAMING_SNAKE_CASE_ , ).images
# make sure passing text embeddings manually is identical
assert np.abs(img_out_a - img_out_a).max() < 1E-4
@skip_mps
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : List[Any] = torch_device == """cpu"""
# Check is relaxed because there is not a torch 2.0 sliced attention added kv processor
lowercase__ : str = 1E-2
self._test_attention_slicing_forward_pass(
test_max_difference=SCREAMING_SNAKE_CASE_ , expected_max_diff=SCREAMING_SNAKE_CASE_)
@skip_mps
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : str = torch_device == """cpu"""
lowercase__ : Optional[int] = True
lowercase__ : Tuple = [
"""decoder_num_inference_steps""",
"""super_res_num_inference_steps""",
]
self._test_inference_batch_single_identical(
test_max_difference=SCREAMING_SNAKE_CASE_ , relax_max_difference=SCREAMING_SNAKE_CASE_ , additional_params_copy_to_batched_inputs=SCREAMING_SNAKE_CASE_ , )
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : List[str] = [
"""decoder_num_inference_steps""",
"""super_res_num_inference_steps""",
]
if torch_device == "mps":
# TODO: MPS errors with larger batch sizes
lowercase__ : Optional[int] = [2, 3]
self._test_inference_batch_consistent(
batch_sizes=SCREAMING_SNAKE_CASE_ , additional_params_copy_to_batched_inputs=SCREAMING_SNAKE_CASE_ , )
else:
self._test_inference_batch_consistent(
additional_params_copy_to_batched_inputs=SCREAMING_SNAKE_CASE_)
@skip_mps
def lowercase__ ( self):
'''simple docstring'''
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def lowercase__ ( self):
'''simple docstring'''
return super().test_save_load_local()
@skip_mps
def lowercase__ ( self):
'''simple docstring'''
return super().test_save_load_optional_components()
@slow
@require_torch_gpu
class _snake_case ( unittest.TestCase ):
def lowercase__ ( self):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Tuple = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/unclip/cat.png""")
lowercase__ : Tuple = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/unclip/karlo_v1_alpha_cat_variation_fp16.npy""")
lowercase__ : str = UnCLIPImageVariationPipeline.from_pretrained(
"""kakaobrain/karlo-v1-alpha-image-variations""" , torch_dtype=torch.floataa)
lowercase__ : Optional[int] = pipeline.to(SCREAMING_SNAKE_CASE_)
pipeline.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_)
lowercase__ : Optional[Any] = torch.Generator(device="""cpu""").manual_seed(0)
lowercase__ : int = pipeline(
SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , output_type="""np""" , )
lowercase__ : str = output.images[0]
assert image.shape == (2_56, 2_56, 3)
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 15)
| 12 |
'''simple docstring'''
import baseaa
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return baseaa.aaaencode(string.encode("utf-8" ) )
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return baseaa.aaadecode(SCREAMING_SNAKE_CASE__ ).decode("utf-8" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 672 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A__ : List[str] = {
"""configuration_mctct""": ["""MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MCTCTConfig"""],
"""feature_extraction_mctct""": ["""MCTCTFeatureExtractor"""],
"""processing_mctct""": ["""MCTCTProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Dict = [
"""MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MCTCTForCTC""",
"""MCTCTModel""",
"""MCTCTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
A__ : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 13 |
'''simple docstring'''
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 672 | 0 |
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , _a , _a , _a ) -> List[str]:
_a : List[Any] = name
_a : List[str] = value
_a : List[str] = weight
def __repr__( self ) -> Optional[int]:
return F"""{self.__class__.__name__}({self.name}, {self.value}, {self.weight})"""
def __lowercase ( self ) -> List[Any]:
return self.value
def __lowercase ( self ) -> int:
return self.name
def __lowercase ( self ) -> Optional[int]:
return self.weight
def __lowercase ( self ) -> Optional[Any]:
return self.value / self.weight
def __UpperCAmelCase ( __a : Optional[int] ,__a : Tuple ,__a : List[str] ) -> List[str]:
"""simple docstring"""
_a : Optional[int] = []
for i in range(len(__a ) ):
menu.append(Things(name[i] ,value[i] ,weight[i] ) )
return menu
def __UpperCAmelCase ( __a : int ,__a : Union[str, Any] ,__a : int ) -> Union[str, Any]:
"""simple docstring"""
_a : Union[str, Any] = sorted(__a ,key=__a ,reverse=__a )
_a : Any = []
_a , _a : Optional[int] = 0.0, 0.0
for i in range(len(__a ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def __UpperCAmelCase ( ) -> int:
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 14 |
'''simple docstring'''
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class __SCREAMING_SNAKE_CASE ( datasets.BeamBasedBuilder ):
'''simple docstring'''
def UpperCamelCase( self ):
return datasets.DatasetInfo(
features=datasets.Features({"content": datasets.Value("string" )} ) , supervised_keys=lowerCamelCase , )
def UpperCamelCase( self , lowerCamelCase , lowerCamelCase ):
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"examples": get_test_dummy_examples()} )]
def UpperCamelCase( self , lowerCamelCase , lowerCamelCase ):
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(lowerCamelCase )
class __SCREAMING_SNAKE_CASE ( datasets.BeamBasedBuilder ):
'''simple docstring'''
def UpperCamelCase( self ):
return datasets.DatasetInfo(
features=datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string" )} )} ) , supervised_keys=lowerCamelCase , )
def UpperCamelCase( self , lowerCamelCase , lowerCamelCase ):
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"examples": get_test_nested_examples()} )
]
def UpperCamelCase( self , lowerCamelCase , lowerCamelCase ):
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(lowerCamelCase )
def snake_case_ ( ):
'''simple docstring'''
return [(i, {"content": content}) for i, content in enumerate(["foo", "bar", "foobar"] )]
def snake_case_ ( ):
'''simple docstring'''
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(["foo", "bar", "foobar"] )]
class __SCREAMING_SNAKE_CASE ( __UpperCamelCase ):
'''simple docstring'''
@require_beam
def UpperCamelCase( self ):
_snake_case = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
_snake_case = DummyBeamDataset(cache_dir=lowerCamelCase , beam_runner="DirectRunner" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(lowerCamelCase , builder.name , "default" , "0.0.0" , F'''{builder.name}-train.arrow''' ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"content": datasets.Value("string" )} ) )
_snake_case = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , lowerCamelCase )
self.assertEqual(dset["train"].info.splits["train"].num_examples , lowerCamelCase )
self.assertDictEqual(dset["train"][0] , get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset["train"][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(lowerCamelCase , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
@require_beam
def UpperCamelCase( self ):
import apache_beam as beam
_snake_case = beam.io.parquetio.WriteToParquet
_snake_case = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
_snake_case = DummyBeamDataset(cache_dir=lowerCamelCase , beam_runner="DirectRunner" )
with patch("apache_beam.io.parquetio.WriteToParquet" ) as write_parquet_mock:
_snake_case = partial(lowerCamelCase , num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
lowerCamelCase , builder.name , "default" , "0.0.0" , F'''{builder.name}-train-00000-of-00002.arrow''' ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
lowerCamelCase , builder.name , "default" , "0.0.0" , F'''{builder.name}-train-00000-of-00002.arrow''' ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"content": datasets.Value("string" )} ) )
_snake_case = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , lowerCamelCase )
self.assertEqual(dset["train"].info.splits["train"].num_examples , lowerCamelCase )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset["train"]["content"] ) , sorted(["foo", "bar", "foobar"] ) )
self.assertTrue(
os.path.exists(os.path.join(lowerCamelCase , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
@require_beam
def UpperCamelCase( self ):
with tempfile.TemporaryDirectory() as tmp_cache_dir:
_snake_case = DummyBeamDataset(cache_dir=lowerCamelCase )
self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare )
@require_beam
def UpperCamelCase( self ):
_snake_case = len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
_snake_case = NestedBeamDataset(cache_dir=lowerCamelCase , beam_runner="DirectRunner" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(lowerCamelCase , builder.name , "default" , "0.0.0" , F'''{builder.name}-train.arrow''' ) ) )
self.assertDictEqual(
builder.info.features , datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string" )} )} ) )
_snake_case = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , lowerCamelCase )
self.assertEqual(dset["train"].info.splits["train"].num_examples , lowerCamelCase )
self.assertDictEqual(dset["train"][0] , get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset["train"][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(lowerCamelCase , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
| 672 | 0 |
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class A ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
A__ = StableUnCLIPPipeline
A__ = TEXT_TO_IMAGE_PARAMS
A__ = TEXT_TO_IMAGE_BATCH_PARAMS
A__ = TEXT_TO_IMAGE_IMAGE_PARAMS
A__ = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
A__ = False
def lowerCamelCase__ (self : Any ) -> str:
"""simple docstring"""
lowercase__ = 32
lowercase__ = embedder_hidden_size
# prior components
torch.manual_seed(0 )
lowercase__ = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
torch.manual_seed(0 )
lowercase__ = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=_UpperCAmelCase , projection_dim=_UpperCAmelCase , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
lowercase__ = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=_UpperCAmelCase , num_layers=1 , )
torch.manual_seed(0 )
lowercase__ = DDPMScheduler(
variance_type="""fixed_small_log""" , prediction_type="""sample""" , num_train_timesteps=1000 , clip_sample=_UpperCAmelCase , clip_sample_range=5.0 , beta_schedule="""squaredcos_cap_v2""" , )
# regular denoising components
torch.manual_seed(0 )
lowercase__ = StableUnCLIPImageNormalizer(embedding_dim=_UpperCAmelCase )
lowercase__ = DDPMScheduler(beta_schedule="""squaredcos_cap_v2""" )
torch.manual_seed(0 )
lowercase__ = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
torch.manual_seed(0 )
lowercase__ = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=_UpperCAmelCase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
lowercase__ = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""CrossAttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """CrossAttnUpBlock2D""") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="""projection""" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=_UpperCAmelCase , layers_per_block=1 , upcast_attention=_UpperCAmelCase , use_linear_projection=_UpperCAmelCase , )
torch.manual_seed(0 )
lowercase__ = DDIMScheduler(
beta_schedule="""scaled_linear""" , beta_start=0.00_085 , beta_end=0.012 , prediction_type="""v_prediction""" , set_alpha_to_one=_UpperCAmelCase , steps_offset=1 , )
torch.manual_seed(0 )
lowercase__ = AutoencoderKL()
lowercase__ = {
# prior components
"""prior_tokenizer""": prior_tokenizer,
"""prior_text_encoder""": prior_text_encoder,
"""prior""": prior,
"""prior_scheduler""": prior_scheduler,
# image noising components
"""image_normalizer""": image_normalizer,
"""image_noising_scheduler""": image_noising_scheduler,
# regular denoising components
"""tokenizer""": tokenizer,
"""text_encoder""": text_encoder,
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
}
return components
def lowerCamelCase__ (self : Dict , _UpperCAmelCase : Tuple , _UpperCAmelCase : List[str]=0 ) -> List[str]:
"""simple docstring"""
if str(_UpperCAmelCase ).startswith("""mps""" ):
lowercase__ = torch.manual_seed(_UpperCAmelCase )
else:
lowercase__ = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase )
lowercase__ = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""prior_num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def lowerCamelCase__ (self : List[str] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = torch_device == """cpu"""
self._test_attention_slicing_forward_pass(test_max_difference=_UpperCAmelCase )
def lowerCamelCase__ (self : List[Any] ) -> Optional[int]:
"""simple docstring"""
lowercase__ = torch_device in ["""cpu""", """mps"""]
self._test_inference_batch_single_identical(test_max_difference=_UpperCAmelCase )
@slow
@require_torch_gpu
class A ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase__ (self : Dict ) -> Optional[int]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase__ (self : Union[str, Any] ) -> Dict:
"""simple docstring"""
lowercase__ = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy""" )
lowercase__ = StableUnCLIPPipeline.from_pretrained("""fusing/stable-unclip-2-1-l""" , torch_dtype=torch.floataa )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowercase__ = torch.Generator(device="""cpu""" ).manual_seed(0 )
lowercase__ = pipe("""anime turle""" , generator=_UpperCAmelCase , output_type="""np""" )
lowercase__ = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_UpperCAmelCase , _UpperCAmelCase )
def lowerCamelCase__ (self : Any ) -> Optional[Any]:
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowercase__ = StableUnCLIPPipeline.from_pretrained("""fusing/stable-unclip-2-1-l""" , torch_dtype=torch.floataa )
lowercase__ = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowercase__ = pipe(
"""anime turtle""" , prior_num_inference_steps=2 , num_inference_steps=2 , output_type="""np""" , )
lowercase__ = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 15 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
__magic_name__ : Optional[int] = False
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
pass
@slow
@require_torch_gpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase( self ):
_snake_case = VersatileDiffusionImageVariationPipeline.from_pretrained("shi-labs/versatile-diffusion" )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
_snake_case = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" )
_snake_case = torch.manual_seed(0 )
_snake_case = pipe(
image=lowerCamelCase , generator=lowerCamelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" , ).images
_snake_case = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
_snake_case = np.array([0.0441, 0.0469, 0.0507, 0.0575, 0.0632, 0.0650, 0.0865, 0.0909, 0.0945] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 672 | 0 |
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
lowerCamelCase__ = LEDConfig
lowerCamelCase__ = {}
lowerCamelCase__ = "gelu"
def __init__( self : Tuple , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[str]=13 , __lowerCamelCase : str=7 , __lowerCamelCase : Any=True , __lowerCamelCase : str=False , __lowerCamelCase : Optional[Any]=99 , __lowerCamelCase : Any=32 , __lowerCamelCase : Tuple=2 , __lowerCamelCase : str=4 , __lowerCamelCase : Optional[int]=37 , __lowerCamelCase : Dict=0.1 , __lowerCamelCase : str=0.1 , __lowerCamelCase : Any=20 , __lowerCamelCase : Optional[int]=2 , __lowerCamelCase : List[Any]=1 , __lowerCamelCase : Optional[Any]=0 , __lowerCamelCase : Dict=4 , ):
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = seq_length
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = eos_token_id
SCREAMING_SNAKE_CASE = pad_token_id
SCREAMING_SNAKE_CASE = bos_token_id
SCREAMING_SNAKE_CASE = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
SCREAMING_SNAKE_CASE = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
SCREAMING_SNAKE_CASE = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def _snake_case ( self : str ):
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
SCREAMING_SNAKE_CASE = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
SCREAMING_SNAKE_CASE = tf.concat([input_ids, eos_tensor] , axis=1 )
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
SCREAMING_SNAKE_CASE = prepare_led_inputs_dict(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE = tf.concat(
[tf.zeros_like(__lowerCamelCase )[:, :-1], tf.ones_like(__lowerCamelCase )[:, -1:]] , axis=-1 , )
SCREAMING_SNAKE_CASE = global_attention_mask
return config, inputs_dict
def _snake_case ( self : int , __lowerCamelCase : str , __lowerCamelCase : Optional[Any] ):
SCREAMING_SNAKE_CASE = TFLEDModel(config=__lowerCamelCase ).get_decoder()
SCREAMING_SNAKE_CASE = inputs_dict["input_ids"]
SCREAMING_SNAKE_CASE = input_ids[:1, :]
SCREAMING_SNAKE_CASE = inputs_dict["attention_mask"][:1, :]
SCREAMING_SNAKE_CASE = 1
# first forward pass
SCREAMING_SNAKE_CASE = model(__lowerCamelCase , attention_mask=__lowerCamelCase , use_cache=__lowerCamelCase )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 3) , config.vocab_size )
SCREAMING_SNAKE_CASE = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
SCREAMING_SNAKE_CASE = tf.concat([input_ids, next_tokens] , axis=-1 )
SCREAMING_SNAKE_CASE = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
SCREAMING_SNAKE_CASE = model(__lowerCamelCase , attention_mask=__lowerCamelCase )[0]
SCREAMING_SNAKE_CASE = model(__lowerCamelCase , attention_mask=__lowerCamelCase , past_key_values=__lowerCamelCase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
SCREAMING_SNAKE_CASE = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
SCREAMING_SNAKE_CASE = output_from_no_past[:, -3:, random_slice_idx]
SCREAMING_SNAKE_CASE = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__lowerCamelCase , __lowerCamelCase , rtol=1e-3 )
def __a ( A__ : int , A__ : Dict , A__ : List[str] , A__ : int=None , A__ : List[Any]=None , A__ : Optional[int]=None , A__ : Dict=None , ):
if attention_mask is None:
SCREAMING_SNAKE_CASE = tf.cast(tf.math.not_equal(A__ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
SCREAMING_SNAKE_CASE = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
SCREAMING_SNAKE_CASE = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
SCREAMING_SNAKE_CASE = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
lowerCamelCase__ = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
lowerCamelCase__ = (
{
"conversational": TFLEDForConditionalGeneration,
"feature-extraction": TFLEDModel,
"summarization": TFLEDForConditionalGeneration,
"text2text-generation": TFLEDForConditionalGeneration,
"translation": TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
lowerCamelCase__ = True
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
def _snake_case ( self : str ):
SCREAMING_SNAKE_CASE = TFLEDModelTester(self )
SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=__lowerCamelCase )
def _snake_case ( self : Tuple ):
self.config_tester.run_common_tests()
def _snake_case ( self : int ):
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__lowerCamelCase )
def _snake_case ( self : Any ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = tf.zeros_like(inputs_dict["attention_mask"] )
SCREAMING_SNAKE_CASE = 2
SCREAMING_SNAKE_CASE = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict["global_attention_mask"] , )
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = self.model_tester.seq_length
SCREAMING_SNAKE_CASE = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(__lowerCamelCase : Tuple ):
SCREAMING_SNAKE_CASE = outputs.decoder_attentions
self.assertEqual(len(__lowerCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(__lowerCamelCase : Dict ):
SCREAMING_SNAKE_CASE = [t.numpy() for t in outputs.encoder_attentions]
SCREAMING_SNAKE_CASE = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(__lowerCamelCase ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(__lowerCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase )
SCREAMING_SNAKE_CASE = model(self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
SCREAMING_SNAKE_CASE = len(__lowerCamelCase )
self.assertEqual(config.output_hidden_states , __lowerCamelCase )
check_encoder_attentions_output(__lowerCamelCase )
if self.is_encoder_decoder:
SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase )
SCREAMING_SNAKE_CASE = model(self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
self.assertEqual(config.output_hidden_states , __lowerCamelCase )
check_decoder_attentions_output(__lowerCamelCase )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase )
SCREAMING_SNAKE_CASE = model(self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
self.assertEqual(config.output_hidden_states , __lowerCamelCase )
check_encoder_attentions_output(__lowerCamelCase )
# Check attention is always last and order is fine
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase )
SCREAMING_SNAKE_CASE = model(self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(__lowerCamelCase ) )
self.assertEqual(model.config.output_hidden_states , __lowerCamelCase )
check_encoder_attentions_output(__lowerCamelCase )
@unittest.skip("LED keeps using potentially symbolic tensors in conditionals and breaks tracing." )
def _snake_case ( self : List[Any] ):
pass
def _snake_case ( self : List[str] ):
# TODO: Head-masking not yet implement
pass
def __a ( A__ : Any ):
return tf.constant(A__ , dtype=tf.intaa )
__A : Optional[int] = 1e-4
@slow
@require_tf
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self : Tuple ):
SCREAMING_SNAKE_CASE = TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384" ).led
# change to intended input here
SCREAMING_SNAKE_CASE = _long_tensor([512 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] )
SCREAMING_SNAKE_CASE = _long_tensor([128 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] )
SCREAMING_SNAKE_CASE = prepare_led_inputs_dict(model.config , __lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE = model(**__lowerCamelCase )[0]
SCREAMING_SNAKE_CASE = (1, 1024, 768)
self.assertEqual(output.shape , __lowerCamelCase )
# change to expected output here
SCREAMING_SNAKE_CASE = tf.convert_to_tensor(
[[2.3_050, 2.8_279, 0.6_531], [-1.8_457, -0.1_455, -3.5_661], [-1.0_186, 0.4_586, -2.2_043]] , )
tf.debugging.assert_near(output[:, :3, :3] , __lowerCamelCase , atol=1e-3 )
def _snake_case ( self : Any ):
SCREAMING_SNAKE_CASE = TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384" )
# change to intended input here
SCREAMING_SNAKE_CASE = _long_tensor([512 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] )
SCREAMING_SNAKE_CASE = _long_tensor([128 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] )
SCREAMING_SNAKE_CASE = prepare_led_inputs_dict(model.config , __lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE = model(**__lowerCamelCase )[0]
SCREAMING_SNAKE_CASE = (1, 1024, model.config.vocab_size)
self.assertEqual(output.shape , __lowerCamelCase )
# change to expected output here
SCREAMING_SNAKE_CASE = tf.convert_to_tensor(
[[33.6_507, 6.4_572, 16.8_089], [5.8_739, -2.4_238, 11.2_902], [-3.2_139, -4.3_149, 4.2_783]] , )
tf.debugging.assert_near(output[:, :3, :3] , __lowerCamelCase , atol=1e-3 , rtol=1e-3 ) | 16 |
'''simple docstring'''
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.text import TextDatasetReader
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
_snake_case = tmp_path / "cache"
_snake_case = {"text": "string"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_snake_case = TextDatasetReader(SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ , keep_in_memory=SCREAMING_SNAKE_CASE__ ).read()
_check_text_dataset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@pytest.mark.parametrize(
"features" , [
None,
{"text": "string"},
{"text": "int32"},
{"text": "float32"},
] , )
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
_snake_case = tmp_path / "cache"
_snake_case = {"text": "string"}
_snake_case = features.copy() if features else default_expected_features
_snake_case = (
Features({feature: Value(SCREAMING_SNAKE_CASE__ ) for feature, dtype in features.items()} ) if features is not None else None
)
_snake_case = TextDatasetReader(SCREAMING_SNAKE_CASE__ , features=SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ ).read()
_check_text_dataset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
_snake_case = tmp_path / "cache"
_snake_case = {"text": "string"}
_snake_case = TextDatasetReader(SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ , split=SCREAMING_SNAKE_CASE__ ).read()
_check_text_dataset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type" , [str, list] )
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if issubclass(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
_snake_case = text_path
elif issubclass(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
_snake_case = [text_path]
_snake_case = tmp_path / "cache"
_snake_case = {"text": "string"}
_snake_case = TextDatasetReader(SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ ).read()
_check_text_dataset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=("train",) ):
'''simple docstring'''
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
for split in splits:
_snake_case = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
_snake_case = tmp_path / "cache"
_snake_case = {"text": "string"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_snake_case = TextDatasetReader({"train": text_path} , cache_dir=SCREAMING_SNAKE_CASE__ , keep_in_memory=SCREAMING_SNAKE_CASE__ ).read()
_check_text_datasetdict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@pytest.mark.parametrize(
"features" , [
None,
{"text": "string"},
{"text": "int32"},
{"text": "float32"},
] , )
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
_snake_case = tmp_path / "cache"
# CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
_snake_case = {"text": "string"}
_snake_case = features.copy() if features else default_expected_features
_snake_case = (
Features({feature: Value(SCREAMING_SNAKE_CASE__ ) for feature, dtype in features.items()} ) if features is not None else None
)
_snake_case = TextDatasetReader({"train": text_path} , features=SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ ).read()
_check_text_datasetdict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if split:
_snake_case = {split: text_path}
else:
_snake_case = "train"
_snake_case = {"train": text_path, "test": text_path}
_snake_case = tmp_path / "cache"
_snake_case = {"text": "string"}
_snake_case = TextDatasetReader(SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ ).read()
_check_text_datasetdict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
| 672 | 0 |
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class lowerCamelCase_ :
def __init__( self : Dict , __A : Optional[Any]=2 , __A : Union[str, Any]=3 , __A : int=64 , __A : Tuple=None ):
__A : Union[str, Any] = np.random.default_rng(__A )
__A : Optional[int] = length
__A : Union[str, Any] = rng.normal(size=(length,) ).astype(np.floataa )
__A : Union[str, Any] = a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa )
def __len__( self : Optional[int] ):
return self.length
def __getitem__( self : Tuple , __A : int ):
return {"x": self.x[i], "y": self.y[i]}
class lowerCamelCase_ ( torch.nn.Module ):
def __init__( self : Tuple , __A : List[Any]=0 , __A : Optional[int]=0 , __A : Optional[int]=False ):
super().__init__()
__A : int = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
__A : Any = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
__A : Tuple = True
def lowerCAmelCase_ ( self : Optional[int] , __A : Optional[Any]=None ):
if self.first_batch:
print(F"""Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}""" )
__A : str = False
return x * self.a[0] + self.b[0]
class lowerCamelCase_ ( torch.nn.Module ):
def __init__( self : Dict , __A : str=0 , __A : List[Any]=0 , __A : Optional[int]=False ):
super().__init__()
__A : int = torch.nn.Parameter(torch.tensor(__A ).float() )
__A : str = torch.nn.Parameter(torch.tensor(__A ).float() )
__A : Optional[Any] = True
def lowerCAmelCase_ ( self : Optional[Any] , __A : Optional[int]=None ):
if self.first_batch:
print(F"""Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}""" )
__A : Optional[Any] = False
return x * self.a + self.b
def __SCREAMING_SNAKE_CASE ( a__ : Union[str, Any] ,a__ : int = 16 ) -> List[str]:
from datasets import load_dataset
from transformers import AutoTokenizer
__A : List[str] = AutoTokenizer.from_pretrained("""bert-base-cased""" )
__A : Dict = {"""train""": """tests/test_samples/MRPC/train.csv""", """validation""": """tests/test_samples/MRPC/dev.csv"""}
__A : Any = load_dataset("""csv""" ,data_files=a__ )
__A : Optional[int] = datasets["""train"""].unique("""label""" )
__A : Optional[int] = {v: i for i, v in enumerate(a__ )}
def tokenize_function(a__ : List[Any] ):
# max_length=None => use the model max length (it's actually the default)
__A : int = tokenizer(
examples["""sentence1"""] ,examples["""sentence2"""] ,truncation=a__ ,max_length=a__ ,padding="""max_length""" )
if "label" in examples:
__A : List[str] = [label_to_id[l] for l in examples["""label"""]]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
__A : str = datasets.map(
a__ ,batched=a__ ,remove_columns=["""sentence1""", """sentence2""", """label"""] ,)
def collate_fn(a__ : Union[str, Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(a__ ,padding="""max_length""" ,max_length=128 ,return_tensors="""pt""" )
return tokenizer.pad(a__ ,padding="""longest""" ,return_tensors="""pt""" )
# Instantiate dataloaders.
__A : Tuple = DataLoader(tokenized_datasets["""train"""] ,shuffle=a__ ,collate_fn=a__ ,batch_size=2 )
__A : List[str] = DataLoader(tokenized_datasets["""validation"""] ,shuffle=a__ ,collate_fn=a__ ,batch_size=1 )
return train_dataloader, eval_dataloader
| 17 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__magic_name__ : Any = {
"""configuration_swinv2""": ["""SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Swinv2Config"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : Dict = [
"""SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Swinv2ForImageClassification""",
"""Swinv2ForMaskedImageModeling""",
"""Swinv2Model""",
"""Swinv2PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swinva import (
SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinvaForImageClassification,
SwinvaForMaskedImageModeling,
SwinvaModel,
SwinvaPreTrainedModel,
)
else:
import sys
__magic_name__ : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 672 | 0 |
'''simple docstring'''
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
def __a(SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
'''simple docstring'''
_lowerCAmelCase = R"\w+[.]\d+"
_lowerCAmelCase = re.findall(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for pat in pats:
_lowerCAmelCase = key.replace(SCREAMING_SNAKE_CASE_ , "_".join(pat.split("." ) ) )
return key
def __a(SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Optional[Any] ):
'''simple docstring'''
_lowerCAmelCase = pt_tuple_key[:-1] + ("scale",)
if (
any("norm" in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
_lowerCAmelCase = pt_tuple_key[:-1] + ("scale",)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
_lowerCAmelCase = pt_tuple_key[:-1] + ("scale",)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
_lowerCAmelCase = pt_tuple_key[:-1] + ("embedding",)
return renamed_pt_tuple_key, pt_tensor
# conv layer
_lowerCAmelCase = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
_lowerCAmelCase = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
_lowerCAmelCase = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight":
_lowerCAmelCase = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
_lowerCAmelCase = pt_tuple_key[:-1] + ("weight",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
_lowerCAmelCase = pt_tuple_key[:-1] + ("bias",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def __a(SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Any=42 ):
'''simple docstring'''
_lowerCAmelCase = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
_lowerCAmelCase = flax_model.init_weights(PRNGKey(SCREAMING_SNAKE_CASE_ ) )
_lowerCAmelCase = flatten_dict(SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
_lowerCAmelCase = rename_key(SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = tuple(renamed_pt_key.split("." ) )
# Correctly rename weight parameters
_lowerCAmelCase , _lowerCAmelCase = rename_key_and_reshape_tensor(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '''
F'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
# also add unexpected weight so that warning is thrown
_lowerCAmelCase = jnp.asarray(SCREAMING_SNAKE_CASE_ )
return unflatten_dict(SCREAMING_SNAKE_CASE_ )
| 18 |
'''simple docstring'''
import math
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return math.pow(SCREAMING_SNAKE_CASE__ , 2 ) - a
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return 2 * x
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
_snake_case = 2.0
while start <= a:
_snake_case = math.pow(SCREAMING_SNAKE_CASE__ , 2 )
return start
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = 99_99 , SCREAMING_SNAKE_CASE__ = 0.00000000000001 ):
'''simple docstring'''
if a < 0:
raise ValueError("math domain error" )
_snake_case = get_initial_point(SCREAMING_SNAKE_CASE__ )
for _ in range(SCREAMING_SNAKE_CASE__ ):
_snake_case = value
_snake_case = value - fx(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) / fx_derivative(SCREAMING_SNAKE_CASE__ )
if abs(prev_value - value ) < tolerance:
return value
return value
if __name__ == "__main__":
from doctest import testmod
testmod()
| 672 | 0 |
"""simple docstring"""
_a = {}
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case ) -> int:
"""simple docstring"""
if late == 3 or absent == 2:
return 0
# if we have no days left, and have not failed any other rules,
# we have a prize string
if days == 0:
return 1
# No easy solution, so now we need to do the recursive calculation
# First, check if the combination is already in the cache, and
# if yes, return the stored value from there since we already
# know the number of possible prize strings from this point on
_UpperCamelCase = (days, absent, late)
if key in cache:
return cache[key]
# now we calculate the three possible ways that can unfold from
# this point on, depending on our attendance today
# 1) if we are late (but not absent), the "absent" counter stays as
# it is, but the "late" counter increases by one
_UpperCamelCase = _calculate(days - 1, __snake_case, late + 1 )
# 2) if we are absent, the "absent" counter increases by 1, and the
# "late" counter resets to 0
_UpperCamelCase = _calculate(days - 1, absent + 1, 0 )
# 3) if we are on time, this resets the "late" counter and keeps the
# absent counter
_UpperCamelCase = _calculate(days - 1, __snake_case, 0 )
_UpperCamelCase = state_late + state_absent + state_ontime
_UpperCamelCase = prizestrings
return prizestrings
def lowerCamelCase__ ( __snake_case = 30 ) -> int:
"""simple docstring"""
return _calculate(__snake_case, absent=0, late=0 )
if __name__ == "__main__":
print(solution())
| 19 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ : Optional[int] = logging.get_logger(__name__)
__magic_name__ : Optional[int] = {
"""microsoft/git-base""": """https://huggingface.co/microsoft/git-base/resolve/main/config.json""",
}
class __SCREAMING_SNAKE_CASE ( __UpperCamelCase ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = '''git_vision_model'''
def __init__( self , lowerCamelCase=768 , lowerCamelCase=3_072 , lowerCamelCase=12 , lowerCamelCase=12 , lowerCamelCase=3 , lowerCamelCase=224 , lowerCamelCase=16 , lowerCamelCase="quick_gelu" , lowerCamelCase=1e-5 , lowerCamelCase=0.0 , lowerCamelCase=0.02 , **lowerCamelCase , ):
super().__init__(**lowerCamelCase )
_snake_case = hidden_size
_snake_case = intermediate_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = num_channels
_snake_case = patch_size
_snake_case = image_size
_snake_case = initializer_range
_snake_case = attention_dropout
_snake_case = layer_norm_eps
_snake_case = hidden_act
@classmethod
def UpperCamelCase( cls , lowerCamelCase , **lowerCamelCase ):
cls._set_token_in_kwargs(lowerCamelCase )
_snake_case , _snake_case = cls.get_config_dict(lowerCamelCase , **lowerCamelCase )
# get the vision config dict if we are loading from GITConfig
if config_dict.get("model_type" ) == "git":
_snake_case = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(lowerCamelCase , **lowerCamelCase )
class __SCREAMING_SNAKE_CASE ( __UpperCamelCase ):
'''simple docstring'''
UpperCAmelCase__ : int = '''git'''
def __init__( self , lowerCamelCase=None , lowerCamelCase=30_522 , lowerCamelCase=768 , lowerCamelCase=6 , lowerCamelCase=12 , lowerCamelCase=3_072 , lowerCamelCase="gelu" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=1_024 , lowerCamelCase=0.02 , lowerCamelCase=1e-12 , lowerCamelCase=0 , lowerCamelCase="absolute" , lowerCamelCase=True , lowerCamelCase=False , lowerCamelCase=101 , lowerCamelCase=102 , lowerCamelCase=None , **lowerCamelCase , ):
super().__init__(bos_token_id=lowerCamelCase , eos_token_id=lowerCamelCase , pad_token_id=lowerCamelCase , **lowerCamelCase )
if vision_config is None:
_snake_case = {}
logger.info("vision_config is None. initializing the GitVisionConfig with default values." )
_snake_case = GitVisionConfig(**lowerCamelCase )
_snake_case = vocab_size
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = hidden_act
_snake_case = intermediate_size
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = max_position_embeddings
_snake_case = initializer_range
_snake_case = layer_norm_eps
_snake_case = position_embedding_type
_snake_case = use_cache
_snake_case = tie_word_embeddings
_snake_case = num_image_with_embedding
_snake_case = bos_token_id
_snake_case = eos_token_id
def UpperCamelCase( self ):
_snake_case = copy.deepcopy(self.__dict__ )
_snake_case = self.vision_config.to_dict()
_snake_case = self.__class__.model_type
return output
| 672 | 0 |
from math import factorial
def _lowercase( __a : int , __a : int , __a : float ):
if successes > trials:
raise ValueError('successes must be lower or equal to trials' )
if trials < 0 or successes < 0:
raise ValueError('the function is defined for non-negative integers' )
if not isinstance(__a , __a ) or not isinstance(__a , __a ):
raise ValueError('the function is defined for non-negative integers' )
if not 0 < prob < 1:
raise ValueError('prob has to be in range of 1 - 0' )
a__ =(prob**successes) * ((1 - prob) ** (trials - successes))
# Calculate the binomial coefficient: n! / k!(n-k)!
a__ =float(factorial(__a ) )
coefficient /= factorial(__a ) * factorial(trials - successes )
return probability * coefficient
if __name__ == "__main__":
from doctest import testmod
testmod()
print('Probability of 2 successes out of 4 trails')
print('with probability of 0.75 is:', end=' ')
print(binomial_distribution(2, 4, 0.75))
| 20 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import tensorflow as tf
from .utils import logging
__magic_name__ : Dict = logging.get_logger(__name__)
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if isinstance(SCREAMING_SNAKE_CASE__ , np.ndarray ):
return list(tensor.shape )
_snake_case = tf.shape(SCREAMING_SNAKE_CASE__ )
if tensor.shape == tf.TensorShape(SCREAMING_SNAKE_CASE__ ):
return dynamic
_snake_case = tensor.shape.as_list()
return [dynamic[i] if s is None else s for i, s in enumerate(SCREAMING_SNAKE_CASE__ )]
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None ):
'''simple docstring'''
return tf.nn.softmax(logits=logits + 1E-9 , axis=SCREAMING_SNAKE_CASE__ , name=SCREAMING_SNAKE_CASE__ )
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=1E-5 , SCREAMING_SNAKE_CASE__=-1 ):
'''simple docstring'''
if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
raise NotImplementedError("Only 1D weight and bias tensors are supported for now, with only a single axis." )
# Get mean and variance on the axis to be normalized
_snake_case , _snake_case = tf.nn.moments(SCREAMING_SNAKE_CASE__ , axes=[axis] , keepdims=SCREAMING_SNAKE_CASE__ )
if axis != -1:
# Reshape scale and weight to have the same rank as inputs, but with 1 dimensions
# on every dimension except axis
_snake_case = [1] * inputs.shape.rank
_snake_case = shape_list(SCREAMING_SNAKE_CASE__ )[axis]
_snake_case = tf.reshape(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
_snake_case = tf.reshape(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Compute layer normalization using the batch_normalization
# function.
_snake_case = tf.nn.batch_normalization(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , offset=SCREAMING_SNAKE_CASE__ , scale=SCREAMING_SNAKE_CASE__ , variance_epsilon=SCREAMING_SNAKE_CASE__ , )
return outputs
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=0 , SCREAMING_SNAKE_CASE__=-1 ):
'''simple docstring'''
if end_dim < 0:
end_dim += input.shape.rank
if start_dim < 0:
start_dim += input.shape.rank
if start_dim == end_dim:
return input
_snake_case = tf.shape(SCREAMING_SNAKE_CASE__ )
_snake_case = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] )
_snake_case = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] , axis=0 )
return tf.reshape(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE__ , tf.Tensor ):
_snake_case = tf.convert_to_tensor(SCREAMING_SNAKE_CASE__ ) # Catches stray NumPy inputs
if encoder_attention_mask.shape.rank == 3:
_snake_case = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.shape.rank == 2:
_snake_case = encoder_attention_mask[:, None, None, :]
# T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
# Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow
# /transformer/transformer_layers.py#L270
# encoder_extended_attention_mask = (encoder_extended_attention_mask ==
# encoder_extended_attention_mask.transpose(-1, -2))
_snake_case = (
tf.cast(1 , encoder_attention_mask.dtype ) - encoder_extended_attention_mask
) * encoder_extended_attention_mask.dtype.min
return encoder_extended_attention_mask
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = "input_ids" ):
'''simple docstring'''
tf.debugging.assert_less(
SCREAMING_SNAKE_CASE__ , tf.cast(SCREAMING_SNAKE_CASE__ , dtype=tensor.dtype ) , message=(
f'''The maximum value of {tensor_name} ({tf.math.reduce_max(SCREAMING_SNAKE_CASE__ )}) must be smaller than the embedding '''
f'''layer\'s input dimension ({embed_dim}). The likely cause is some problem at tokenization time.'''
) , )
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
_snake_case = 6_45_12
# Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT`
# because in that case even chunking the array would not make the saving
# possible.
_snake_case = [x for x in data if len(SCREAMING_SNAKE_CASE__ ) > HDF5_OBJECT_HEADER_LIMIT]
# Expecting this to never be true.
if bad_attributes:
raise RuntimeError(
"The following attributes cannot be saved to HDF5 file because "
f'''they are larger than {HDF5_OBJECT_HEADER_LIMIT} '''
f'''bytes: {bad_attributes}''' )
_snake_case = np.asarray(SCREAMING_SNAKE_CASE__ )
_snake_case = 1
_snake_case = np.array_split(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# This will never loop forever thanks to the test above.
while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ):
num_chunks += 1
_snake_case = np.array_split(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if num_chunks > 1:
for chunk_id, chunk_data in enumerate(SCREAMING_SNAKE_CASE__ ):
_snake_case = chunk_data
else:
_snake_case = data
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if name in group.attrs:
_snake_case = [n.decode("utf8" ) if hasattr(SCREAMING_SNAKE_CASE__ , "decode" ) else n for n in group.attrs[name]]
else:
_snake_case = []
_snake_case = 0
while "%s%d" % (name, chunk_id) in group.attrs:
data.extend(
[n.decode("utf8" ) if hasattr(SCREAMING_SNAKE_CASE__ , "decode" ) else n for n in group.attrs["%s%d" % (name, chunk_id)]] )
chunk_id += 1
return data
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def _expand_single_ad_tensor(SCREAMING_SNAKE_CASE__ ):
if isinstance(SCREAMING_SNAKE_CASE__ , tf.Tensor ) and t.shape.rank == 1:
return tf.expand_dims(SCREAMING_SNAKE_CASE__ , axis=-1 )
return t
return tf.nest.map_structure(_expand_single_ad_tensor , SCREAMING_SNAKE_CASE__ )
| 672 | 0 |
import unittest
from transformers import TrOCRConfig
from transformers.testing_utils import is_torch_available, require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM
@require_torch
class __A :
def __init__( self :Optional[Any] , __snake_case :List[str] , __snake_case :Any=99 , __snake_case :Optional[int]=13 , __snake_case :Any=16 , __snake_case :Any=7 , __snake_case :Tuple=True , __snake_case :List[Any]=True , __snake_case :Union[str, Any]=True , __snake_case :Dict=False , __snake_case :str=True , __snake_case :Optional[Any]=2 , __snake_case :Optional[int]=32 , __snake_case :int=4 , __snake_case :str=4 , __snake_case :Optional[int]=30 , __snake_case :str=0 , __snake_case :Optional[Any]=1 , __snake_case :Optional[int]=2 , __snake_case :List[str]=None , ):
'''simple docstring'''
__magic_name__ : Union[str, Any] =parent
__magic_name__ : str =batch_size
__magic_name__ : List[str] =decoder_seq_length
# For common tests
__magic_name__ : Optional[Any] =self.decoder_seq_length
__magic_name__ : Any =is_training
__magic_name__ : Any =use_attention_mask
__magic_name__ : List[str] =use_labels
__magic_name__ : List[str] =vocab_size
__magic_name__ : str =d_model
__magic_name__ : Optional[int] =d_model
__magic_name__ : str =decoder_layers
__magic_name__ : Optional[Any] =decoder_layers
__magic_name__ : Dict =decoder_ffn_dim
__magic_name__ : Tuple =decoder_attention_heads
__magic_name__ : Optional[Any] =decoder_attention_heads
__magic_name__ : Union[str, Any] =eos_token_id
__magic_name__ : Optional[Any] =bos_token_id
__magic_name__ : Optional[Any] =pad_token_id
__magic_name__ : Any =decoder_start_token_id
__magic_name__ : str =use_cache
__magic_name__ : Optional[int] =max_position_embeddings
__magic_name__ : Any =None
__magic_name__ : Dict =decoder_seq_length
__magic_name__ : Union[str, Any] =2
__magic_name__ : List[str] =1
def A__ ( self :Optional[Any] ):
'''simple docstring'''
__magic_name__ : Union[str, Any] =ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
__magic_name__ : Any =None
if self.use_attention_mask:
__magic_name__ : int =ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 )
__magic_name__ : Optional[int] =None
if self.use_labels:
__magic_name__ : List[str] =ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
__magic_name__ : Tuple =TrOCRConfig(
vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , )
return (config, input_ids, attention_mask, lm_labels)
def A__ ( self :Union[str, Any] , __snake_case :Any , __snake_case :int , __snake_case :Dict , __snake_case :Dict , ):
'''simple docstring'''
__magic_name__ : Dict =True
__magic_name__ : List[Any] =TrOCRDecoder(config=__snake_case ).to(__snake_case ).eval()
__magic_name__ : List[Any] =input_ids[:2]
input_ids[input_ids == 0] += 1
# first forward pass
__magic_name__ : str =model(__snake_case , use_cache=__snake_case )
__magic_name__ : Optional[Any] =model(__snake_case )
__magic_name__ : str =model(__snake_case , use_cache=__snake_case )
self.parent.assertTrue(len(__snake_case ) == len(__snake_case ) )
self.parent.assertTrue(len(__snake_case ) == len(__snake_case ) + 1 )
__magic_name__ : str =outputs["""past_key_values"""]
# create hypothetical next token and extent to next_input_ids
__magic_name__ : Dict =ids_tensor((2, 1) , config.vocab_size - 1 ) + 1
# append to next input_ids and
__magic_name__ : List[Any] =torch.cat([input_ids, next_tokens] , dim=-1 )
__magic_name__ : Dict =model(__snake_case )["""last_hidden_state"""]
__magic_name__ : Union[str, Any] =model(__snake_case , past_key_values=__snake_case )["""last_hidden_state"""]
# select random slice
__magic_name__ : Dict =ids_tensor((1,) , output_from_past.shape[-1] ).item()
__magic_name__ : List[str] =output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
__magic_name__ : List[str] =output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(__snake_case , __snake_case , atol=1E-3 )
def A__ ( self :Tuple ):
'''simple docstring'''
__magic_name__ : Tuple =self.prepare_config_and_inputs()
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ : int =config_and_inputs
__magic_name__ : Optional[int] ={"""input_ids""": input_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_torch
class __A ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
UpperCamelCase = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else ()
UpperCamelCase = (TrOCRForCausalLM,) if is_torch_available() else ()
UpperCamelCase = {"""text-generation""": TrOCRForCausalLM} if is_torch_available() else {}
UpperCamelCase = True
UpperCamelCase = False
def A__ ( self :Dict ):
'''simple docstring'''
__magic_name__ : Dict =TrOCRStandaloneDecoderModelTester(self , is_training=__snake_case )
__magic_name__ : Optional[int] =ConfigTester(self , config_class=__snake_case )
def A__ ( self :Optional[int] ):
'''simple docstring'''
pass
def A__ ( self :Tuple ):
'''simple docstring'''
pass
def A__ ( self :int ):
'''simple docstring'''
pass
def A__ ( self :Union[str, Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def A__ ( self :List[Any] ):
'''simple docstring'''
__magic_name__ : Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*__snake_case )
def A__ ( self :Union[str, Any] ):
'''simple docstring'''
return
@unittest.skip("""The model doesn't support left padding""" ) # and it's not used enough to be worth fixing :)
def A__ ( self :Optional[int] ):
'''simple docstring'''
pass
| 21 |
'''simple docstring'''
__magic_name__ : int = """Alexander Joslin"""
import operator as op
from .stack import Stack
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
_snake_case = {"*": op.mul, "/": op.truediv, "+": op.add, "-": op.sub}
_snake_case = Stack()
_snake_case = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(SCREAMING_SNAKE_CASE__ ) )
elif i in operators:
# RULE 2
operator_stack.push(SCREAMING_SNAKE_CASE__ )
elif i == ")":
# RULE 4
_snake_case = operator_stack.peek()
operator_stack.pop()
_snake_case = operand_stack.peek()
operand_stack.pop()
_snake_case = operand_stack.peek()
operand_stack.pop()
_snake_case = operators[opr](SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
operand_stack.push(SCREAMING_SNAKE_CASE__ )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
__magic_name__ : List[str] = """(5 + ((4 * 2) * (2 + 3)))"""
# answer = 45
print(F'{equation} = {dijkstras_two_stack_algorithm(equation)}')
| 672 | 0 |
'''simple docstring'''
import torch
from transformers import CamembertForMaskedLM, CamembertTokenizer
def snake_case_ (UpperCamelCase : List[str] , UpperCamelCase : str , UpperCamelCase : int , UpperCamelCase : Optional[int]=5 ):
'''simple docstring'''
assert masked_input.count('''<mask>''' ) == 1
_a = torch.tensor(tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) ).unsqueeze(0 ) # Batch size 1
_a = model(UpperCamelCase )[0] # The last hidden-state is the first element of the output tuple
_a = (input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item()
_a = logits[0, masked_index, :]
_a = logits.softmax(dim=0 )
_a , _a = prob.topk(k=UpperCamelCase , dim=0 )
_a = ''' '''.join(
[tokenizer.convert_ids_to_tokens(indices[i].item() ) for i in range(len(UpperCamelCase ) )] )
_a = tokenizer.mask_token
_a = []
for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(''' ''' ) ):
_a = predicted_token_bpe.replace('''\u2581''' , ''' ''' )
if " {0}".format(UpperCamelCase ) in masked_input:
topk_filled_outputs.append(
(
masked_input.replace(''' {0}'''.format(UpperCamelCase ) , UpperCamelCase ),
values[index].item(),
predicted_token,
) )
else:
topk_filled_outputs.append(
(
masked_input.replace(UpperCamelCase , UpperCamelCase ),
values[index].item(),
predicted_token,
) )
return topk_filled_outputs
_snake_case : Optional[Any] = CamembertTokenizer.from_pretrained('camembert-base')
_snake_case : str = CamembertForMaskedLM.from_pretrained('camembert-base')
model.eval()
_snake_case : str = 'Le camembert est <mask> :)'
print(fill_mask(masked_input, model, tokenizer, topk=3))
| 22 |
'''simple docstring'''
from torch import nn
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(f'''Unsupported activation function: {act_fn}''' )
| 672 | 0 |
from collections import defaultdict
from typing import Optional
from ..image_utils import load_image
from ..utils import (
add_end_docstrings,
is_torch_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING
snake_case__ : str = logging.get_logger(__name__)
@add_end_docstrings(UpperCAmelCase__ )
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
def __init__( self , **_UpperCAmelCase ) -> Optional[Any]:
super().__init__(**_UpperCAmelCase )
requires_backends(self , 'vision' )
requires_backends(self , 'torch' )
if self.framework != "pt":
raise ValueError(f"""The {self.__class__} is only available in PyTorch.""" )
self.check_model_type(_UpperCAmelCase )
def _UpperCAmelCase ( self , **_UpperCAmelCase ) -> Any:
UpperCamelCase_ = {}
UpperCamelCase_ = {}
UpperCamelCase_ = {}
# preprocess args
if "points_per_batch" in kwargs:
UpperCamelCase_ = kwargs['points_per_batch']
if "points_per_crop" in kwargs:
UpperCamelCase_ = kwargs['points_per_crop']
if "crops_n_layers" in kwargs:
UpperCamelCase_ = kwargs['crops_n_layers']
if "crop_overlap_ratio" in kwargs:
UpperCamelCase_ = kwargs['crop_overlap_ratio']
if "crop_n_points_downscale_factor" in kwargs:
UpperCamelCase_ = kwargs['crop_n_points_downscale_factor']
# postprocess args
if "pred_iou_thresh" in kwargs:
UpperCamelCase_ = kwargs['pred_iou_thresh']
if "stability_score_offset" in kwargs:
UpperCamelCase_ = kwargs['stability_score_offset']
if "mask_threshold" in kwargs:
UpperCamelCase_ = kwargs['mask_threshold']
if "stability_score_thresh" in kwargs:
UpperCamelCase_ = kwargs['stability_score_thresh']
if "crops_nms_thresh" in kwargs:
UpperCamelCase_ = kwargs['crops_nms_thresh']
if "output_rle_mask" in kwargs:
UpperCamelCase_ = kwargs['output_rle_mask']
if "output_bboxes_mask" in kwargs:
UpperCamelCase_ = kwargs['output_bboxes_mask']
return preprocess_kwargs, forward_params, postprocess_kwargs
def __call__( self , _UpperCAmelCase , *_UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=None , **_UpperCAmelCase ) -> List[str]:
return super().__call__(_UpperCAmelCase , *_UpperCAmelCase , num_workers=_UpperCAmelCase , batch_size=_UpperCAmelCase , **_UpperCAmelCase )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase=64 , _UpperCAmelCase = 0 , _UpperCAmelCase = 512 / 1500 , _UpperCAmelCase = 32 , _UpperCAmelCase = 1 , ) -> Optional[Any]:
UpperCamelCase_ = load_image(_UpperCAmelCase )
UpperCamelCase_ = self.image_processor.size['longest_edge']
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = self.image_processor.generate_crop_boxes(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
UpperCamelCase_ = self.image_processor(images=_UpperCAmelCase , return_tensors='pt' )
with self.device_placement():
if self.framework == "pt":
UpperCamelCase_ = self.get_inference_context()
with inference_context():
UpperCamelCase_ = self._ensure_tensor_on_device(_UpperCAmelCase , device=self.device )
UpperCamelCase_ = self.model.get_image_embeddings(model_inputs.pop('pixel_values' ) )
UpperCamelCase_ = image_embeddings
UpperCamelCase_ = grid_points.shape[1]
UpperCamelCase_ = points_per_batch if points_per_batch is not None else n_points
if points_per_batch <= 0:
raise ValueError(
'Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. '
'To return all points at once, set points_per_batch to None' )
for i in range(0 , _UpperCAmelCase , _UpperCAmelCase ):
UpperCamelCase_ = grid_points[:, i : i + points_per_batch, :, :]
UpperCamelCase_ = input_labels[:, i : i + points_per_batch]
UpperCamelCase_ = i == n_points - points_per_batch
yield {
"input_points": batched_points,
"input_labels": labels,
"input_boxes": crop_boxes,
"is_last": is_last,
**model_inputs,
}
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase=0.8_8 , _UpperCAmelCase=0.9_5 , _UpperCAmelCase=0 , _UpperCAmelCase=1 , ) -> List[Any]:
UpperCamelCase_ = model_inputs.pop('input_boxes' )
UpperCamelCase_ = model_inputs.pop('is_last' )
UpperCamelCase_ = model_inputs.pop('original_sizes' ).tolist()
UpperCamelCase_ = model_inputs.pop('reshaped_input_sizes' ).tolist()
UpperCamelCase_ = self.model(**_UpperCAmelCase )
# post processing happens here in order to avoid CPU GPU copies of ALL the masks
UpperCamelCase_ = model_outputs['pred_masks']
UpperCamelCase_ = self.image_processor.post_process_masks(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , binarize=_UpperCAmelCase )
UpperCamelCase_ = model_outputs['iou_scores']
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = self.image_processor.filter_masks(
masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , )
return {
"masks": masks,
"is_last": is_last,
"boxes": boxes,
"iou_scores": iou_scores,
}
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=0.7 , ) -> Any:
UpperCamelCase_ = []
UpperCamelCase_ = []
UpperCamelCase_ = []
for model_output in model_outputs:
all_scores.append(model_output.pop('iou_scores' ) )
all_masks.extend(model_output.pop('masks' ) )
all_boxes.append(model_output.pop('boxes' ) )
UpperCamelCase_ = torch.cat(_UpperCAmelCase )
UpperCamelCase_ = torch.cat(_UpperCAmelCase )
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = self.image_processor.post_process_for_mask_generation(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
UpperCamelCase_ = defaultdict(_UpperCAmelCase )
for output in model_outputs:
for k, v in output.items():
extra[k].append(_UpperCAmelCase )
UpperCamelCase_ = {}
if output_rle_mask:
UpperCamelCase_ = rle_mask
if output_bboxes_mask:
UpperCamelCase_ = bounding_boxes
return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
| 23 |
'''simple docstring'''
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
__magic_name__ : Tuple = 0
__magic_name__ : Dict = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
__magic_name__ : str = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
__magic_name__ : Dict = tuple[int, int]
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ):
_snake_case = pos_x
_snake_case = pos_y
_snake_case = (pos_y, pos_x)
_snake_case = goal_x
_snake_case = goal_y
_snake_case = g_cost
_snake_case = parent
_snake_case = self.calculate_heuristic()
_snake_case = self.g_cost + self.h_cost
def UpperCamelCase( self ):
_snake_case = self.pos_x - self.goal_x
_snake_case = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(lowerCamelCase ) + abs(lowerCamelCase )
else:
return sqrt(dy**2 + dx**2 )
def __lt__( self , lowerCamelCase ):
return self.f_cost < other.f_cost
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self , lowerCamelCase , lowerCamelCase ):
_snake_case = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , lowerCamelCase )
_snake_case = Node(goal[1] , goal[0] , goal[1] , goal[0] , 99_999 , lowerCamelCase )
_snake_case = [self.start]
_snake_case = []
_snake_case = False
def UpperCamelCase( self ):
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
_snake_case = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(lowerCamelCase )
self.closed_nodes.append(lowerCamelCase )
_snake_case = self.get_successors(lowerCamelCase )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(lowerCamelCase )
else:
# retrieve the best current path
_snake_case = self.open_nodes.pop(self.open_nodes.index(lowerCamelCase ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(lowerCamelCase )
else:
self.open_nodes.append(lowerCamelCase )
return [self.start.pos]
def UpperCamelCase( self , lowerCamelCase ):
_snake_case = []
for action in delta:
_snake_case = parent.pos_x + action[1]
_snake_case = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(lowerCamelCase ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
lowerCamelCase , lowerCamelCase , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , lowerCamelCase , ) )
return successors
def UpperCamelCase( self , lowerCamelCase ):
_snake_case = node
_snake_case = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
_snake_case = current_node.parent
path.reverse()
return path
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self , lowerCamelCase , lowerCamelCase ):
_snake_case = AStar(lowerCamelCase , lowerCamelCase )
_snake_case = AStar(lowerCamelCase , lowerCamelCase )
_snake_case = False
def UpperCamelCase( self ):
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
_snake_case = self.fwd_astar.open_nodes.pop(0 )
_snake_case = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
lowerCamelCase , lowerCamelCase )
self.fwd_astar.closed_nodes.append(lowerCamelCase )
self.bwd_astar.closed_nodes.append(lowerCamelCase )
_snake_case = current_bwd_node
_snake_case = current_fwd_node
_snake_case = {
self.fwd_astar: self.fwd_astar.get_successors(lowerCamelCase ),
self.bwd_astar: self.bwd_astar.get_successors(lowerCamelCase ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(lowerCamelCase )
else:
# retrieve the best current path
_snake_case = astar.open_nodes.pop(
astar.open_nodes.index(lowerCamelCase ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(lowerCamelCase )
else:
astar.open_nodes.append(lowerCamelCase )
return [self.fwd_astar.start.pos]
def UpperCamelCase( self , lowerCamelCase , lowerCamelCase ):
_snake_case = self.fwd_astar.retrace_path(lowerCamelCase )
_snake_case = self.bwd_astar.retrace_path(lowerCamelCase )
bwd_path.pop()
bwd_path.reverse()
_snake_case = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
__magic_name__ : Optional[int] = (0, 0)
__magic_name__ : Any = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
__magic_name__ : Any = time.time()
__magic_name__ : Optional[int] = AStar(init, goal)
__magic_name__ : str = a_star.search()
__magic_name__ : List[Any] = time.time() - start_time
print(F'AStar execution time = {end_time:f} seconds')
__magic_name__ : List[str] = time.time()
__magic_name__ : Optional[Any] = BidirectionalAStar(init, goal)
__magic_name__ : Optional[int] = time.time() - bd_start_time
print(F'BidirectionalAStar execution time = {bd_end_time:f} seconds')
| 672 | 0 |
'''simple docstring'''
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def _UpperCamelCase (_lowerCamelCase : Union[str, Any] , _lowerCamelCase : List[Any] , _lowerCamelCase : int )-> Any:
'''simple docstring'''
if gpta_config_file == "":
__snake_case = GPTaConfig()
else:
__snake_case = GPTaConfig.from_json_file(_lowerCamelCase )
__snake_case = GPTaModel(_lowerCamelCase )
# Load weights from numpy
load_tf_weights_in_gpta(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Save pytorch-model
__snake_case = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
__snake_case = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
print(f'''Save PyTorch model to {pytorch_weights_dump_path}''' )
torch.save(model.state_dict() , _lowerCamelCase )
print(f'''Save configuration file to {pytorch_config_dump_path}''' )
with open(_lowerCamelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
UpperCAmelCase_ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--gpt2_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--gpt2_config_file''',
default='''''',
type=str,
help=(
'''An optional config json file corresponding to the pre-trained OpenAI model. \n'''
'''This specifies the model architecture.'''
),
)
UpperCAmelCase_ : Optional[Any] = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 24 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__magic_name__ : int = {
"""configuration_nezha""": ["""NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """NezhaConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : List[Any] = [
"""NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""NezhaForNextSentencePrediction""",
"""NezhaForMaskedLM""",
"""NezhaForPreTraining""",
"""NezhaForMultipleChoice""",
"""NezhaForQuestionAnswering""",
"""NezhaForSequenceClassification""",
"""NezhaForTokenClassification""",
"""NezhaModel""",
"""NezhaPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nezha import (
NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
NezhaPreTrainedModel,
)
else:
import sys
__magic_name__ : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 672 | 0 |
import mpmath # for roots of unity
import numpy as np
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : Any , a : Any=None , a : List[Any]=None ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = list(poly_a or [0] )[:]
SCREAMING_SNAKE_CASE : str = list(poly_b or [0] )[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
SCREAMING_SNAKE_CASE : Union[str, Any] = len(self.polyA )
while self.polyB[-1] == 0:
self.polyB.pop()
SCREAMING_SNAKE_CASE : Dict = len(self.polyB )
# Add 0 to make lengths equal a power of 2
SCREAMING_SNAKE_CASE : str = int(
2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) )
while len(self.polyA ) < self.c_max_length:
self.polyA.append(0 )
while len(self.polyB ) < self.c_max_length:
self.polyB.append(0 )
# A complex root used for the fourier transform
SCREAMING_SNAKE_CASE : Any = complex(mpmath.root(x=1 , n=self.c_max_length , k=1 ) )
# The product
SCREAMING_SNAKE_CASE : Any = self.__multiply()
def __UpperCamelCase ( self : int , a : List[Any] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = [[x] for x in self.polyA] if which == "A" else [[x] for x in self.polyB]
# Corner case
if len(a ) <= 1:
return dft[0]
#
SCREAMING_SNAKE_CASE : int = self.c_max_length // 2
while next_ncol > 0:
SCREAMING_SNAKE_CASE : Union[str, Any] = [[] for i in range(a )]
SCREAMING_SNAKE_CASE : Optional[Any] = self.root**next_ncol
# First half of next step
SCREAMING_SNAKE_CASE : List[str] = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(a ):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] )
current_root *= root
# Second half of next step
SCREAMING_SNAKE_CASE : List[Any] = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(a ):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] )
current_root *= root
# Update
SCREAMING_SNAKE_CASE : Any = new_dft
SCREAMING_SNAKE_CASE : Union[str, Any] = next_ncol // 2
return dft[0]
def __UpperCamelCase ( self : int ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = self.__dft("A" )
SCREAMING_SNAKE_CASE : int = self.__dft("B" )
SCREAMING_SNAKE_CASE : Optional[Any] = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0] ) <= 1:
return inverce_c[0]
# Inverse DFT
SCREAMING_SNAKE_CASE : List[str] = 2
while next_ncol <= self.c_max_length:
SCREAMING_SNAKE_CASE : Any = [[] for i in range(a )]
SCREAMING_SNAKE_CASE : Union[str, Any] = self.root ** (next_ncol // 2)
SCREAMING_SNAKE_CASE : Union[str, Any] = 1
# First half of next step
for j in range(self.c_max_length // next_ncol ):
for i in range(next_ncol // 2 ):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2 )
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root) )
current_root *= root
# Update
SCREAMING_SNAKE_CASE : List[str] = new_inverse_c
next_ncol *= 2
# Unpack
SCREAMING_SNAKE_CASE : List[str] = [round(x[0].real , 8 ) + round(x[0].imag , 8 ) * 1J for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__( self : int ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = "A = " + " + ".join(
F"{coef}*x^{i}" for coef, i in enumerate(self.polyA[: self.len_A] ) )
SCREAMING_SNAKE_CASE : Tuple = "B = " + " + ".join(
F"{coef}*x^{i}" for coef, i in enumerate(self.polyB[: self.len_B] ) )
SCREAMING_SNAKE_CASE : List[Any] = "A*B = " + " + ".join(
F"{coef}*x^{i}" for coef, i in enumerate(self.product ) )
return F"{a}\n{b}\n{c}"
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod() | 25 |
'''simple docstring'''
import string
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
_snake_case = ""
for i in sequence:
_snake_case = ord(SCREAMING_SNAKE_CASE__ )
if 65 <= extract <= 90:
output += chr(1_55 - extract )
elif 97 <= extract <= 1_22:
output += chr(2_19 - extract )
else:
output += i
return output
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
_snake_case = string.ascii_letters
_snake_case = string.ascii_lowercase[::-1] + string.ascii_uppercase[::-1]
return "".join(
letters_reversed[letters.index(SCREAMING_SNAKE_CASE__ )] if c in letters else c for c in sequence )
def snake_case_ ( ):
'''simple docstring'''
from timeit import timeit
print("Running performance benchmarks..." )
_snake_case = "from string import printable ; from __main__ import atbash, atbash_slow"
print(f'''> atbash_slow(): {timeit("atbash_slow(printable)" , setup=SCREAMING_SNAKE_CASE__ )} seconds''' )
print(f'''> atbash(): {timeit("atbash(printable)" , setup=SCREAMING_SNAKE_CASE__ )} seconds''' )
if __name__ == "__main__":
for example in ("ABCDEFGH", "123GGjj", "testStringtest", "with space"):
print(F'{example} encrypted in atbash: {atbash(example)}')
benchmark()
| 672 | 0 |
'''simple docstring'''
from math import factorial
def _a ( _lowerCamelCase , _lowerCamelCase ) -> int:
"""simple docstring"""
if n < k or k < 0:
raise ValueError("""Please enter positive integers for n and k where n >= k""" )
return factorial(_lowerCamelCase ) // (factorial(_lowerCamelCase ) * factorial(n - k ))
if __name__ == "__main__":
print(
"The number of five-card hands possible from a standard",
f"""fifty-two card deck is: {combinations(52, 5)}\n""",
)
print(
"If a class of 40 students must be arranged into groups of",
f"""4 for group projects, there are {combinations(40, 4)} ways""",
"to arrange them.\n",
)
print(
"If 10 teams are competing in a Formula One race, there",
f"""are {combinations(10, 3)} ways that first, second and""",
"third place can be awarded.",
)
| 26 |
'''simple docstring'''
import numpy as np
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return 1 / (1 + np.exp(-vector ))
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return vector * sigmoid(1.702 * vector )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 672 | 0 |
from __future__ import annotations
import unittest
from transformers import FunnelConfig, is_tf_available
from transformers.testing_utils import require_tf
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
)
class lowerCamelCase:
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_=13 , snake_case_=7 , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=99 , snake_case_=[1, 1, 2] , snake_case_=1 , snake_case_=32 , snake_case_=4 , snake_case_=8 , snake_case_=37 , snake_case_="gelu_new" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=0.0 , snake_case_=512 , snake_case_=3 , snake_case_=0.02 , snake_case_=3 , snake_case_=4 , snake_case_=None , snake_case_=False , ):
_A = parent
_A = batch_size
_A = seq_length
_A = is_training
_A = use_input_mask
_A = use_token_type_ids
_A = use_labels
_A = vocab_size
_A = block_sizes
_A = num_decoder_layers
_A = d_model
_A = n_head
_A = d_head
_A = d_inner
_A = hidden_act
_A = hidden_dropout
_A = attention_dropout
_A = activation_dropout
_A = max_position_embeddings
_A = type_vocab_size
_A = 2
_A = num_labels
_A = num_choices
_A = scope
_A = initializer_std
# Used in the tests to check the size of the first attention layer
_A = n_head
# Used in the tests to check the size of the first hidden state
_A = self.d_model
# Used in the tests to check the number of output hidden states/attentions
_A = sum(self.block_sizes ) + (0 if base else self.num_decoder_layers)
# FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with
# the last hidden state of the first block (which is the first hidden state of the decoder).
if not base:
_A = self.num_hidden_layers + 2
def lowerCAmelCase__ ( self ):
_A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_A = None
if self.use_input_mask:
_A = random_attention_mask([self.batch_size, self.seq_length] )
_A = None
if self.use_token_type_ids:
_A = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_A = None
_A = None
_A = None
if self.use_labels:
_A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_A = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_A = ids_tensor([self.batch_size] , self.num_choices )
_A = FunnelConfig(
vocab_size=self.vocab_size , block_sizes=self.block_sizes , num_decoder_layers=self.num_decoder_layers , d_model=self.d_model , n_head=self.n_head , d_head=self.d_head , d_inner=self.d_inner , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , activation_dropout=self.activation_dropout , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_std=self.initializer_std , )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
)
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , ):
_A = TFFunnelModel(config=snake_case_ )
_A = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
_A = model(snake_case_ )
_A = [input_ids, input_mask]
_A = model(snake_case_ )
_A = model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
_A = False
_A = TFFunnelModel(config=snake_case_ )
_A = model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
_A = False
_A = TFFunnelModel(config=snake_case_ )
_A = model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , ):
_A = TFFunnelBaseModel(config=snake_case_ )
_A = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
_A = model(snake_case_ )
_A = [input_ids, input_mask]
_A = model(snake_case_ )
_A = model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
_A = False
_A = TFFunnelBaseModel(config=snake_case_ )
_A = model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 3, self.d_model) )
_A = False
_A = TFFunnelBaseModel(config=snake_case_ )
_A = model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , ):
_A = TFFunnelForPreTraining(config=snake_case_ )
_A = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
_A = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , ):
_A = TFFunnelForMaskedLM(config=snake_case_ )
_A = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
_A = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , ):
_A = self.num_labels
_A = TFFunnelForSequenceClassification(config=snake_case_ )
_A = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
_A = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , ):
_A = self.num_choices
_A = TFFunnelForMultipleChoice(config=snake_case_ )
_A = tf.tile(tf.expand_dims(snake_case_ , 1 ) , (1, self.num_choices, 1) )
_A = tf.tile(tf.expand_dims(snake_case_ , 1 ) , (1, self.num_choices, 1) )
_A = tf.tile(tf.expand_dims(snake_case_ , 1 ) , (1, self.num_choices, 1) )
_A = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
_A = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , ):
_A = self.num_labels
_A = TFFunnelForTokenClassification(config=snake_case_ )
_A = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
_A = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , ):
_A = TFFunnelForQuestionAnswering(config=snake_case_ )
_A = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
_A = model(snake_case_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase__ ( self ):
_A = self.prepare_config_and_inputs()
(
(
_A
), (
_A
), (
_A
), (
_A
), (
_A
), (
_A
), (
_A
),
) = config_and_inputs
_A = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class lowerCamelCase( __snake_case , __snake_case , unittest.TestCase ):
'''simple docstring'''
__magic_name__ = (
(
TFFunnelModel,
TFFunnelForMaskedLM,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForTokenClassification,
)
if is_tf_available()
else ()
)
__magic_name__ = (
{
'feature-extraction': (TFFunnelBaseModel, TFFunnelModel),
'fill-mask': TFFunnelForMaskedLM,
'question-answering': TFFunnelForQuestionAnswering,
'text-classification': TFFunnelForSequenceClassification,
'token-classification': TFFunnelForTokenClassification,
'zero-shot': TFFunnelForSequenceClassification,
}
if is_tf_available()
else {}
)
__magic_name__ = False
__magic_name__ = False
def lowerCAmelCase__ ( self ):
_A = TFFunnelModelTester(self )
_A = ConfigTester(self , config_class=snake_case_ )
def lowerCAmelCase__ ( self ):
self.config_tester.run_common_tests()
def lowerCAmelCase__ ( self ):
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def lowerCAmelCase__ ( self ):
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*snake_case_ )
def lowerCAmelCase__ ( self ):
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case_ )
def lowerCAmelCase__ ( self ):
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*snake_case_ )
def lowerCAmelCase__ ( self ):
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case_ )
@require_tf
class lowerCamelCase( __snake_case , unittest.TestCase ):
'''simple docstring'''
__magic_name__ = (
(TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else ()
)
__magic_name__ = False
__magic_name__ = False
def lowerCAmelCase__ ( self ):
_A = TFFunnelModelTester(self , base=snake_case_ )
_A = ConfigTester(self , config_class=snake_case_ )
def lowerCAmelCase__ ( self ):
self.config_tester.run_common_tests()
def lowerCAmelCase__ ( self ):
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_base_model(*snake_case_ )
def lowerCAmelCase__ ( self ):
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*snake_case_ )
def lowerCAmelCase__ ( self ):
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*snake_case_ )
| 27 |
'''simple docstring'''
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('''TEST_SAGEMAKER''' , '''False''' ) ) is not True , reason='''Skipping test because should only be run when releasing minor transformers version''' , )
@pytest.mark.usefixtures('''sm_env''' )
@parameterized_class(
[
{
'''framework''': '''pytorch''',
'''script''': '''run_glue.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.g4dn.xlarge''',
'''results''': {'''train_runtime''': 650, '''eval_accuracy''': 0.6, '''eval_loss''': 0.9},
},
{
'''framework''': '''tensorflow''',
'''script''': '''run_tf.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.g4dn.xlarge''',
'''results''': {'''train_runtime''': 600, '''eval_accuracy''': 0.3, '''eval_loss''': 0.9},
},
] )
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase( self ):
if self.framework == "pytorch":
subprocess.run(
F'''cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'''.split() , encoding="utf-8" , check=lowerCamelCase , )
assert hasattr(self , "env" )
def UpperCamelCase( self , lowerCamelCase=1 ):
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=F'''{self.env.base_job_name}-single''' , instance_count=lowerCamelCase , instance_type=self.instance_type , debugger_hook_config=lowerCamelCase , hyperparameters={**self.env.hyperparameters, "model_name_or_path": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , py_version="py36" , )
def UpperCamelCase( self , lowerCamelCase ):
TrainingJobAnalytics(lowerCamelCase ).export_csv(F'''{self.env.test_path}/{job_name}_metrics.csv''' )
def UpperCamelCase( self ):
# create estimator
_snake_case = self.create_estimator()
# run training
estimator.fit()
# result dataframe
_snake_case = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
_snake_case = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"] )
_snake_case = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
_snake_case = (
Session().describe_training_job(estimator.latest_training_job.name ).get("TrainingTimeInSeconds" , 999_999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy )
assert all(t <= self.results["eval_loss"] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F'''{estimator.latest_training_job.name}.json''' , "w" ) as outfile:
json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss} , lowerCamelCase )
| 672 | 0 |
'''simple docstring'''
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
'files' ,[
['full:README.md', 'dataset_infos.json'],
['empty:README.md', 'dataset_infos.json'],
['dataset_infos.json'],
['full:README.md'],
] ,)
def lowercase__( __UpperCamelCase: List[str] ,__UpperCamelCase: Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = tmp_path_factory.mktemp('dset_infos_dir' )
if "full:README.md" in files:
with open(dataset_infos_dir / 'README.md' ,'w' ) as f:
f.write('---\ndataset_info:\n dataset_size: 42\n---' )
if "empty:README.md" in files:
with open(dataset_infos_dir / 'README.md' ,'w' ) as f:
f.write('' )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / 'dataset_infos.json' ,'w' ) as f:
f.write('{"default": {"dataset_size": 42}}' )
SCREAMING_SNAKE_CASE : Tuple = DatasetInfosDict.from_directory(__UpperCamelCase )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 42
@pytest.mark.parametrize(
'dataset_info' ,[
DatasetInfo(),
DatasetInfo(
description='foo' ,features=Features({'a': Value('int32' )} ) ,builder_name='builder' ,config_name='config' ,version='1.0.0' ,splits=[{'name': 'train'}] ,download_size=42 ,),
] ,)
def lowercase__( __UpperCamelCase: List[str] ,__UpperCamelCase: DatasetInfo ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = str(__UpperCamelCase )
dataset_info.write_to_directory(__UpperCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = DatasetInfo.from_directory(__UpperCamelCase )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(__UpperCamelCase ,'dataset_info.json' ) )
def lowercase__( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = DatasetInfo(
description='foo' ,citation='bar' ,homepage='https://foo.bar' ,license='CC0' ,features=Features({'a': Value('int32' )} ) ,post_processed={} ,supervised_keys=() ,task_templates=[] ,builder_name='builder' ,config_name='config' ,version='1.0.0' ,splits=[{'name': 'train', 'num_examples': 42}] ,download_checksums={} ,download_size=13_37 ,post_processing_size=4_42 ,dataset_size=12_34 ,size_in_bytes=13_37 + 4_42 + 12_34 ,)
SCREAMING_SNAKE_CASE : Optional[Any] = dataset_info._to_yaml_dict()
assert sorted(__UpperCamelCase ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key] ,(list, dict, int, str) )
SCREAMING_SNAKE_CASE : List[str] = yaml.safe_dump(__UpperCamelCase )
SCREAMING_SNAKE_CASE : str = yaml.safe_load(__UpperCamelCase )
assert dataset_info_yaml_dict == reloaded
def lowercase__( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = DatasetInfo()
SCREAMING_SNAKE_CASE : Optional[Any] = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
'dataset_infos_dict' ,[
DatasetInfosDict(),
DatasetInfosDict({'default': DatasetInfo()} ),
DatasetInfosDict({'my_config_name': DatasetInfo()} ),
DatasetInfosDict(
{
'default': DatasetInfo(
description='foo' ,features=Features({'a': Value('int32' )} ) ,builder_name='builder' ,config_name='config' ,version='1.0.0' ,splits=[{'name': 'train'}] ,download_size=42 ,)
} ),
DatasetInfosDict(
{
'v1': DatasetInfo(dataset_size=42 ),
'v2': DatasetInfo(dataset_size=13_37 ),
} ),
] ,)
def lowercase__( __UpperCamelCase: Tuple ,__UpperCamelCase: DatasetInfosDict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = str(__UpperCamelCase )
dataset_infos_dict.write_to_directory(__UpperCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = DatasetInfosDict.from_directory(__UpperCamelCase )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
SCREAMING_SNAKE_CASE : Any = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
SCREAMING_SNAKE_CASE : Optional[Any] = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(__UpperCamelCase ,'README.md' ) )
| 28 |
'''simple docstring'''
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( __UpperCamelCase ):
'''simple docstring'''
UpperCAmelCase__ : int = DistilBertTokenizer
UpperCAmelCase__ : Union[str, Any] = DistilBertTokenizerFast
UpperCAmelCase__ : List[str] = True
@slow
def UpperCamelCase( self ):
_snake_case = DistilBertTokenizer.from_pretrained("distilbert-base-uncased" )
_snake_case = tokenizer.encode("sequence builders" , add_special_tokens=lowerCamelCase )
_snake_case = tokenizer.encode("multi-sequence build" , add_special_tokens=lowerCamelCase )
_snake_case = tokenizer.build_inputs_with_special_tokens(lowerCamelCase )
_snake_case = tokenizer.build_inputs_with_special_tokens(lowerCamelCase , lowerCamelCase )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 672 | 0 |
"""simple docstring"""
from __future__ import annotations
from typing import Any
class __lowerCamelCase :
def __init__( self , UpperCAmelCase = 6 ):
lowerCamelCase_ = None
lowerCamelCase_ = None
self.create_linked_list(UpperCAmelCase )
def UpperCAmelCase__ ( self , UpperCAmelCase ):
lowerCamelCase_ = Node()
lowerCamelCase_ = current_node
lowerCamelCase_ = current_node
lowerCamelCase_ = current_node
for _ in range(1 , UpperCAmelCase ):
lowerCamelCase_ = Node()
lowerCamelCase_ = current_node
lowerCamelCase_ = previous_node
lowerCamelCase_ = current_node
lowerCamelCase_ = self.front
lowerCamelCase_ = previous_node
def UpperCAmelCase__ ( self ):
return (
self.front == self.rear
and self.front is not None
and self.front.data is None
)
def UpperCAmelCase__ ( self ):
self.check_can_perform_operation()
return self.front.data if self.front else None
def UpperCAmelCase__ ( self , UpperCAmelCase ):
if self.rear is None:
return
self.check_is_full()
if not self.is_empty():
lowerCamelCase_ = self.rear.next
if self.rear:
lowerCamelCase_ = data
def UpperCAmelCase__ ( self ):
self.check_can_perform_operation()
if self.rear is None or self.front is None:
return None
if self.front == self.rear:
lowerCamelCase_ = self.front.data
lowerCamelCase_ = None
return data
lowerCamelCase_ = self.front
lowerCamelCase_ = old_front.next
lowerCamelCase_ = old_front.data
lowerCamelCase_ = None
return data
def UpperCAmelCase__ ( self ):
if self.is_empty():
raise Exception('''Empty Queue''' )
def UpperCAmelCase__ ( self ):
if self.rear and self.rear.next == self.front:
raise Exception('''Full Queue''' )
class __lowerCamelCase :
def __init__( self ):
lowerCamelCase_ = None
lowerCamelCase_ = None
lowerCamelCase_ = None
if __name__ == "__main__":
import doctest
doctest.testmod()
| 29 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__magic_name__ : Optional[int] = {
"""configuration_swiftformer""": [
"""SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""SwiftFormerConfig""",
"""SwiftFormerOnnxConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : Optional[int] = [
"""SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SwiftFormerForImageClassification""",
"""SwiftFormerModel""",
"""SwiftFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
__magic_name__ : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 672 | 0 |
from collections.abc import Iterable
from typing import Generic, TypeVar
__a = TypeVar('_T')
class __a( Generic[_T] ):
"""simple docstring"""
def __init__( self ,_SCREAMING_SNAKE_CASE = None ) -> None:
UpperCAmelCase_ : list[_T] = list(iterable or [] )
UpperCAmelCase_ : list[_T] = []
def __len__( self ) -> int:
return len(self._stacka ) + len(self._stacka )
def __repr__( self ) -> str:
return f'''Queue({tuple(self._stacka[::-1] + self._stacka )})'''
def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> None:
self._stacka.append(_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> _T:
UpperCAmelCase_ : str = self._stacka.pop
UpperCAmelCase_ : List[str] = self._stacka.append
if not self._stacka:
while self._stacka:
stacka_append(stacka_pop() )
if not self._stacka:
raise IndexError('''Queue is empty''' )
return self._stacka.pop()
if __name__ == "__main__":
from doctest import testmod
testmod() | 30 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
__magic_name__ : Union[str, Any] = logging.get_logger(__name__)
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=False ):
'''simple docstring'''
_snake_case = "backbone." if is_semantic else ""
_snake_case = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''{prefix}blocks.{i}.norm1.weight''', f'''beit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm1.bias''', f'''beit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(f'''{prefix}blocks.{i}.attn.proj.weight''', f'''beit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append(
(f'''{prefix}blocks.{i}.attn.proj.bias''', f'''beit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm2.weight''', f'''beit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm2.bias''', f'''beit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc1.weight''', f'''beit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc1.bias''', f'''beit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc2.weight''', f'''beit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc2.bias''', f'''beit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
(f'''{prefix}cls_token''', "beit.embeddings.cls_token"),
(f'''{prefix}patch_embed.proj.weight''', "beit.embeddings.patch_embeddings.projection.weight"),
(f'''{prefix}patch_embed.proj.bias''', "beit.embeddings.patch_embeddings.projection.bias"),
(f'''{prefix}pos_embed''', "beit.embeddings.position_embeddings"),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
("mask_token", "beit.embeddings.mask_token"),
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
("fc_norm.weight", "beit.pooler.layernorm.weight"),
("fc_norm.bias", "beit.pooler.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=False ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
_snake_case = "backbone." if is_semantic else ""
# queries, keys and values
_snake_case = state_dict.pop(f'''{prefix}blocks.{i}.attn.qkv.weight''' )
_snake_case = state_dict.pop(f'''{prefix}blocks.{i}.attn.q_bias''' )
_snake_case = state_dict.pop(f'''{prefix}blocks.{i}.attn.v_bias''' )
_snake_case = in_proj_weight[
: config.hidden_size, :
]
_snake_case = q_bias
_snake_case = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_snake_case = in_proj_weight[
-config.hidden_size :, :
]
_snake_case = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
_snake_case = state_dict.pop(f'''{prefix}blocks.{i}.gamma_1''' )
_snake_case = state_dict.pop(f'''{prefix}blocks.{i}.gamma_2''' )
_snake_case = gamma_a
_snake_case = gamma_a
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
_snake_case = dct.pop(SCREAMING_SNAKE_CASE__ )
_snake_case = val
def snake_case_ ( ):
'''simple docstring'''
_snake_case = "http://images.cocodataset.org/val2017/000000039769.jpg"
_snake_case = Image.open(requests.get(SCREAMING_SNAKE_CASE__ , stream=SCREAMING_SNAKE_CASE__ ).raw )
return im
@torch.no_grad()
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False ):
'''simple docstring'''
_snake_case = False if "rvlcdip" in checkpoint_url else True
_snake_case = BeitConfig(use_absolute_position_embeddings=SCREAMING_SNAKE_CASE__ , use_mask_token=SCREAMING_SNAKE_CASE__ )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
_snake_case = 10_24
_snake_case = 40_96
_snake_case = 24
_snake_case = 16
# labels
if "rvlcdip" in checkpoint_url:
_snake_case = 16
_snake_case = "huggingface/label-files"
_snake_case = "rvlcdip-id2label.json"
_snake_case = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , repo_type="dataset" ) , "r" ) )
_snake_case = {int(SCREAMING_SNAKE_CASE__ ): v for k, v in idalabel.items()}
_snake_case = idalabel
_snake_case = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
_snake_case = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE__ , map_location="cpu" )["model"]
_snake_case = create_rename_keys(SCREAMING_SNAKE_CASE__ , has_lm_head=SCREAMING_SNAKE_CASE__ )
for src, dest in rename_keys:
rename_key(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
read_in_q_k_v(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , has_lm_head=SCREAMING_SNAKE_CASE__ )
# load HuggingFace model
_snake_case = BeitForMaskedImageModeling(SCREAMING_SNAKE_CASE__ ) if has_lm_head else BeitForImageClassification(SCREAMING_SNAKE_CASE__ )
model.eval()
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
# Check outputs on an image
_snake_case = BeitImageProcessor(
size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=SCREAMING_SNAKE_CASE__ )
_snake_case = prepare_img()
_snake_case = image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors="pt" )
_snake_case = encoding["pixel_values"]
_snake_case = model(SCREAMING_SNAKE_CASE__ )
_snake_case = outputs.logits
# verify logits
_snake_case = [1, 16] if "rvlcdip" in checkpoint_url else [1, 1_96, 81_92]
assert logits.shape == torch.Size(SCREAMING_SNAKE_CASE__ ), "Shape of logits not as expected"
Path(SCREAMING_SNAKE_CASE__ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE__ )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
if push_to_hub:
if has_lm_head:
_snake_case = "dit-base" if "base" in checkpoint_url else "dit-large"
else:
_snake_case = "dit-base-finetuned-rvlcdip" if "dit-b" in checkpoint_url else "dit-large-finetuned-rvlcdip"
image_processor.push_to_hub(
repo_path_or_name=Path(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , organization="nielsr" , commit_message="Add image processor" , use_temp_dir=SCREAMING_SNAKE_CASE__ , )
model.push_to_hub(
repo_path_or_name=Path(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , organization="nielsr" , commit_message="Add model" , use_temp_dir=SCREAMING_SNAKE_CASE__ , )
if __name__ == "__main__":
__magic_name__ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_url""",
default="""https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth""",
type=str,
help="""URL to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
)
__magic_name__ : Dict = parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 672 | 0 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
lowerCamelCase__ : Optional[int] = logging.get_logger(__name__)
lowerCamelCase__ : Dict = {
'allenai/longformer-base-4096': 'https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json',
'allenai/longformer-large-4096': 'https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json',
'allenai/longformer-large-4096-finetuned-triviaqa': (
'https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json'
),
'allenai/longformer-base-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json'
),
'allenai/longformer-large-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json'
),
}
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = "longformer"
def __init__( self : Union[str, Any] , _lowerCAmelCase : Union[List[int], int] = 512 , _lowerCAmelCase : int = 2 , _lowerCAmelCase : int = 1 , _lowerCAmelCase : int = 0 , _lowerCAmelCase : int = 2 , _lowerCAmelCase : int = 30_522 , _lowerCAmelCase : int = 768 , _lowerCAmelCase : int = 12 , _lowerCAmelCase : int = 12 , _lowerCAmelCase : int = 3_072 , _lowerCAmelCase : str = "gelu" , _lowerCAmelCase : float = 0.1 , _lowerCAmelCase : float = 0.1 , _lowerCAmelCase : int = 512 , _lowerCAmelCase : int = 2 , _lowerCAmelCase : float = 0.02 , _lowerCAmelCase : float = 1E-12 , _lowerCAmelCase : bool = False , **_lowerCAmelCase : Union[str, Any] , ):
super().__init__(pad_token_id=_lowerCAmelCase , **_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = attention_window
SCREAMING_SNAKE_CASE_ = sep_token_id
SCREAMING_SNAKE_CASE_ = bos_token_id
SCREAMING_SNAKE_CASE_ = eos_token_id
SCREAMING_SNAKE_CASE_ = vocab_size
SCREAMING_SNAKE_CASE_ = hidden_size
SCREAMING_SNAKE_CASE_ = num_hidden_layers
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = intermediate_size
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = max_position_embeddings
SCREAMING_SNAKE_CASE_ = type_vocab_size
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = layer_norm_eps
SCREAMING_SNAKE_CASE_ = onnx_export
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : Optional[Any] , _lowerCAmelCase : "PretrainedConfig" , _lowerCAmelCase : str = "default" , _lowerCAmelCase : "List[PatchingSpec]" = None ):
super().__init__(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = True
@property
def lowerCAmelCase_ ( self : Any ):
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE_ = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
SCREAMING_SNAKE_CASE_ = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('global_attention_mask', dynamic_axis),
] )
@property
def lowerCAmelCase_ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE_ = super().outputs
if self.task == "default":
SCREAMING_SNAKE_CASE_ = {0: 'batch'}
return outputs
@property
def lowerCAmelCase_ ( self : str ):
return 1E-4
@property
def lowerCAmelCase_ ( self : Optional[Any] ):
# needs to be >= 14 to support tril operator
return max(super().default_onnx_opset , 14 )
def lowerCAmelCase_ ( self : str , _lowerCAmelCase : "PreTrainedTokenizerBase" , _lowerCAmelCase : int = -1 , _lowerCAmelCase : int = -1 , _lowerCAmelCase : bool = False , _lowerCAmelCase : Optional[TensorType] = None , ):
SCREAMING_SNAKE_CASE_ = super().generate_dummy_inputs(
preprocessor=_lowerCAmelCase , batch_size=_lowerCAmelCase , seq_length=_lowerCAmelCase , is_pair=_lowerCAmelCase , framework=_lowerCAmelCase )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
SCREAMING_SNAKE_CASE_ = torch.zeros_like(inputs['input_ids'] )
# make every second token global
SCREAMING_SNAKE_CASE_ = 1
return inputs | 31 |
'''simple docstring'''
import math
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not number % 2:
# Negatives, 0, 1 and all even numbers are not primes
return False
_snake_case = range(3 , int(math.sqrt(SCREAMING_SNAKE_CASE__ ) + 1 ) , 2 )
return not any(not number % i for i in odd_numbers )
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=1 , **SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
_snake_case = factor * value
_snake_case = value
while not is_prime(SCREAMING_SNAKE_CASE__ ):
value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1 , **SCREAMING_SNAKE_CASE__ )
return value
| 672 | 0 |
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
UpperCAmelCase_ = (3, 9, -11, 0, 7, 5, 1, -1)
UpperCAmelCase_ = (4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class __UpperCamelCase :
__A : int
__A : Node | None
class __UpperCamelCase :
def __init__( self , _UpperCamelCase ):
_UpperCAmelCase = None
for i in sorted(_UpperCamelCase , reverse=_UpperCamelCase ):
_UpperCAmelCase = Node(_UpperCamelCase , self.head )
def __iter__( self ):
_UpperCAmelCase = self.head
while node:
yield node.data
_UpperCAmelCase = node.next_node
def __len__( self ):
return sum(1 for _ in self )
def __str__( self ):
return " -> ".join([str(_UpperCamelCase ) for node in self] )
def A__ ( SCREAMING_SNAKE_CASE_ : SortedLinkedList , SCREAMING_SNAKE_CASE_ : SortedLinkedList ) -> SortedLinkedList:
"""simple docstring"""
return SortedLinkedList(list(SCREAMING_SNAKE_CASE_ ) + list(SCREAMING_SNAKE_CASE_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase_ = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even))) | 32 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__magic_name__ : Dict = {
"""configuration_pix2struct""": [
"""PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""Pix2StructConfig""",
"""Pix2StructTextConfig""",
"""Pix2StructVisionConfig""",
],
"""processing_pix2struct""": ["""Pix2StructProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : List[str] = ["""Pix2StructImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : List[Any] = [
"""PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Pix2StructPreTrainedModel""",
"""Pix2StructForConditionalGeneration""",
"""Pix2StructVisionModel""",
"""Pix2StructTextModel""",
]
if TYPE_CHECKING:
from .configuration_pixastruct import (
PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP,
PixaStructConfig,
PixaStructTextConfig,
PixaStructVisionConfig,
)
from .processing_pixastruct import PixaStructProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_pixastruct import PixaStructImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pixastruct import (
PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST,
PixaStructForConditionalGeneration,
PixaStructPreTrainedModel,
PixaStructTextModel,
PixaStructVisionModel,
)
else:
import sys
__magic_name__ : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 672 | 0 |
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class __magic_name__ :
'''simple docstring'''
def __init__( self:int , _a:Optional[Any] , _a:Any=2 , _a:Dict=True , _a:List[Any]=False , _a:List[str]=10 , _a:Union[str, Any]=3 , _a:Tuple=32 * 8 , _a:Dict=32 * 8 , _a:List[str]=4 , _a:Union[str, Any]=64 , ):
snake_case__ = parent
snake_case__ = batch_size
snake_case__ = is_training
snake_case__ = use_auxiliary_loss
snake_case__ = num_queries
snake_case__ = num_channels
snake_case__ = min_size
snake_case__ = max_size
snake_case__ = num_labels
snake_case__ = hidden_dim
snake_case__ = hidden_dim
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
snake_case__ = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
_a )
snake_case__ = torch.ones([self.batch_size, self.min_size, self.max_size] , device=_a )
snake_case__ = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=_a ) > 0.5
).float()
snake_case__ = (torch.rand((self.batch_size, self.num_labels) , device=_a ) > 0.5).long()
snake_case__ = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def SCREAMING_SNAKE_CASE__ ( self:List[str] ):
snake_case__ = MaskaFormerConfig(
hidden_size=self.hidden_dim , )
snake_case__ = self.num_queries
snake_case__ = self.num_labels
snake_case__ = [1, 1, 1, 1]
snake_case__ = self.num_channels
snake_case__ = 64
snake_case__ = 1_28
snake_case__ = self.hidden_dim
snake_case__ = self.hidden_dim
snake_case__ = self.hidden_dim
return config
def SCREAMING_SNAKE_CASE__ ( self:int ):
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ = self.prepare_config_and_inputs()
snake_case__ = {'''pixel_values''': pixel_values, '''pixel_mask''': pixel_mask}
return config, inputs_dict
def SCREAMING_SNAKE_CASE__ ( self:List[Any] , _a:int , _a:str ):
snake_case__ = output.encoder_hidden_states
snake_case__ = output.pixel_decoder_hidden_states
snake_case__ = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(_a ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_a ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_a ) , config.decoder_layers )
def SCREAMING_SNAKE_CASE__ ( self:List[str] , _a:Optional[Any] , _a:Optional[int] , _a:List[str] , _a:Tuple=False ):
with torch.no_grad():
snake_case__ = MaskaFormerModel(config=_a )
model.to(_a )
model.eval()
snake_case__ = model(pixel_values=_a , pixel_mask=_a )
snake_case__ = model(_a , output_hidden_states=_a )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(_a , _a )
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] , _a:str , _a:Dict , _a:Optional[Any] , _a:str , _a:str ):
snake_case__ = MaskaFormerForUniversalSegmentation(config=_a )
model.to(_a )
model.eval()
def comm_check_on_output(_a:Any ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
snake_case__ = model(pixel_values=_a , pixel_mask=_a )
snake_case__ = model(_a )
comm_check_on_output(_a )
snake_case__ = model(
pixel_values=_a , pixel_mask=_a , mask_labels=_a , class_labels=_a )
comm_check_on_output(_a )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class __magic_name__ (snake_case_ ,snake_case_ ,unittest.TestCase ):
'''simple docstring'''
__lowercase : Optional[Any] = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
__lowercase : int = {'feature-extraction': MaskaFormerModel} if is_torch_available() else {}
__lowercase : List[Any] = False
__lowercase : str = False
__lowercase : Union[str, Any] = False
__lowercase : int = False
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ):
snake_case__ = MaskaFormerModelTester(self )
snake_case__ = ConfigTester(self , config_class=_a , has_text_modality=_a )
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
snake_case__ , snake_case__ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(_a , **_a , output_hidden_states=_a )
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
snake_case__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*_a )
@unittest.skip(reason='''Mask2Former does not use inputs_embeds''' )
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
pass
@unittest.skip(reason='''Mask2Former does not have a get_input_embeddings method''' )
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
pass
@unittest.skip(reason='''Mask2Former is not a generative model''' )
def SCREAMING_SNAKE_CASE__ ( self:List[str] ):
pass
@unittest.skip(reason='''Mask2Former does not use token embeddings''' )
def SCREAMING_SNAKE_CASE__ ( self:int ):
pass
@require_torch_multi_gpu
@unittest.skip(
reason='''Mask2Former has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def SCREAMING_SNAKE_CASE__ ( self:Any ):
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
pass
def SCREAMING_SNAKE_CASE__ ( self:Any ):
snake_case__ , snake_case__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ = model_class(_a )
snake_case__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case__ = [*signature.parameters.keys()]
snake_case__ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _a )
@slow
def SCREAMING_SNAKE_CASE__ ( self:int ):
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
snake_case__ = MaskaFormerModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ):
snake_case__ = (self.model_tester.min_size,) * 2
snake_case__ = {
'''pixel_values''': torch.randn((2, 3, *size) , device=_a ),
'''mask_labels''': torch.randn((2, 10, *size) , device=_a ),
'''class_labels''': torch.zeros(2 , 10 , device=_a ).long(),
}
snake_case__ = self.model_tester.get_config()
snake_case__ = MaskaFormerForUniversalSegmentation(_a ).to(_a )
snake_case__ = model(**_a )
self.assertTrue(outputs.loss is not None )
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
snake_case__ , snake_case__ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(_a , **_a , output_hidden_states=_a )
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
snake_case__ , snake_case__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ = model_class(_a ).to(_a )
snake_case__ = model(**_a , output_attentions=_a )
self.assertTrue(outputs.attentions is not None )
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
if not self.model_tester.is_training:
return
snake_case__ = self.all_model_classes[1]
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ = self.model_tester.prepare_config_and_inputs()
snake_case__ = model_class(_a )
model.to(_a )
model.train()
snake_case__ = model(_a , mask_labels=_a , class_labels=_a ).loss
loss.backward()
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
snake_case__ = self.all_model_classes[1]
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ = self.model_tester.prepare_config_and_inputs()
snake_case__ = True
snake_case__ = True
snake_case__ = model_class(_a ).to(_a )
model.train()
snake_case__ = model(_a , mask_labels=_a , class_labels=_a )
snake_case__ = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
snake_case__ = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
snake_case__ = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
snake_case__ = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=_a )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
lowerCamelCase__ : Tuple = 1E-4
def SCREAMING_SNAKE_CASE ( ) -> Tuple:
snake_case__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_vision
@slow
class __magic_name__ (unittest.TestCase ):
'''simple docstring'''
@cached_property
def SCREAMING_SNAKE_CASE__ ( self:Any ):
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def SCREAMING_SNAKE_CASE__ ( self:int ):
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ):
snake_case__ = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(_a )
snake_case__ = self.default_image_processor
snake_case__ = prepare_img()
snake_case__ = image_processor(_a , return_tensors='''pt''' ).to(_a )
snake_case__ = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_a , (1, 3, 3_84, 3_84) )
with torch.no_grad():
snake_case__ = model(**_a )
snake_case__ = torch.tensor(
[[-0.2790, -1.0717, -1.1668], [-0.5128, -0.3128, -0.4987], [-0.5832, 0.1971, -0.0197]] ).to(_a )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , _a , atol=_a ) )
snake_case__ = torch.tensor(
[[0.8973, 1.1847, 1.1776], [1.1934, 1.5040, 1.5128], [1.1153, 1.4486, 1.4951]] ).to(_a )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , _a , atol=_a ) )
snake_case__ = torch.tensor(
[[2.1152, 1.7000, -0.8603], [1.5808, 1.8004, -0.9353], [1.6043, 1.7495, -0.5999]] ).to(_a )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , _a , atol=_a ) )
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
snake_case__ = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(_a ).eval()
snake_case__ = self.default_image_processor
snake_case__ = prepare_img()
snake_case__ = image_processor(_a , return_tensors='''pt''' ).to(_a )
snake_case__ = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_a , (1, 3, 3_84, 3_84) )
with torch.no_grad():
snake_case__ = model(**_a )
# masks_queries_logits
snake_case__ = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
snake_case__ = [
[-8.7839, -9.0056, -8.8121],
[-7.4104, -7.0313, -6.5401],
[-6.6105, -6.3427, -6.4675],
]
snake_case__ = torch.tensor(_a ).to(_a )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , _a , atol=_a ) )
# class_queries_logits
snake_case__ = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) )
snake_case__ = torch.tensor(
[
[1.8324, -8.0835, -4.1922],
[0.8450, -9.0050, -3.6053],
[0.3045, -7.7293, -3.0275],
] ).to(_a )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , _a , atol=_a ) )
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
snake_case__ = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(_a ).eval()
snake_case__ = self.default_image_processor
snake_case__ = image_processor(
[np.zeros((3, 8_00, 13_33) ), np.zeros((3, 8_00, 13_33) )] , segmentation_maps=[np.zeros((3_84, 3_84) ).astype(np.floataa ), np.zeros((3_84, 3_84) ).astype(np.floataa )] , return_tensors='''pt''' , )
snake_case__ = inputs['''pixel_values'''].to(_a )
snake_case__ = [el.to(_a ) for el in inputs['''mask_labels''']]
snake_case__ = [el.to(_a ) for el in inputs['''class_labels''']]
with torch.no_grad():
snake_case__ = model(**_a )
self.assertTrue(outputs.loss is not None )
| 33 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
__magic_name__ : Optional[int] = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( __UpperCamelCase ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = ['''pixel_values''']
def __init__( self , lowerCamelCase = True , lowerCamelCase = None , lowerCamelCase = PILImageResampling.BILINEAR , lowerCamelCase = True , lowerCamelCase = None , lowerCamelCase = True , lowerCamelCase = 1 / 255 , lowerCamelCase = True , lowerCamelCase = None , lowerCamelCase = None , **lowerCamelCase , ):
super().__init__(**lowerCamelCase )
_snake_case = size if size is not None else {"shortest_edge": 256}
_snake_case = get_size_dict(lowerCamelCase , default_to_square=lowerCamelCase )
_snake_case = crop_size if crop_size is not None else {"height": 224, "width": 224}
_snake_case = get_size_dict(lowerCamelCase )
_snake_case = do_resize
_snake_case = size
_snake_case = resample
_snake_case = do_center_crop
_snake_case = crop_size
_snake_case = do_rescale
_snake_case = rescale_factor
_snake_case = do_normalize
_snake_case = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_snake_case = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCamelCase( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = PILImageResampling.BICUBIC , lowerCamelCase = None , **lowerCamelCase , ):
_snake_case = get_size_dict(lowerCamelCase , default_to_square=lowerCamelCase )
if "shortest_edge" not in size:
raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
_snake_case = get_resize_output_image_size(lowerCamelCase , size=size["shortest_edge"] , default_to_square=lowerCamelCase )
return resize(lowerCamelCase , size=lowerCamelCase , resample=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def UpperCamelCase( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase , ):
_snake_case = get_size_dict(lowerCamelCase )
return center_crop(lowerCamelCase , size=(size["height"], size["width"]) , data_format=lowerCamelCase , **lowerCamelCase )
def UpperCamelCase( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase ):
return rescale(lowerCamelCase , scale=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def UpperCamelCase( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase , ):
return normalize(lowerCamelCase , mean=lowerCamelCase , std=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def UpperCamelCase( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = ChannelDimension.FIRST , **lowerCamelCase , ):
_snake_case = do_resize if do_resize is not None else self.do_resize
_snake_case = size if size is not None else self.size
_snake_case = get_size_dict(lowerCamelCase , default_to_square=lowerCamelCase )
_snake_case = resample if resample is not None else self.resample
_snake_case = do_center_crop if do_center_crop is not None else self.do_center_crop
_snake_case = crop_size if crop_size is not None else self.crop_size
_snake_case = get_size_dict(lowerCamelCase )
_snake_case = do_rescale if do_rescale is not None else self.do_rescale
_snake_case = rescale_factor if rescale_factor is not None else self.rescale_factor
_snake_case = do_normalize if do_normalize is not None else self.do_normalize
_snake_case = image_mean if image_mean is not None else self.image_mean
_snake_case = image_std if image_std is not None else self.image_std
_snake_case = make_list_of_images(lowerCamelCase )
if not valid_images(lowerCamelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
_snake_case = [to_numpy_array(lowerCamelCase ) for image in images]
if do_resize:
_snake_case = [self.resize(image=lowerCamelCase , size=lowerCamelCase , resample=lowerCamelCase ) for image in images]
if do_center_crop:
_snake_case = [self.center_crop(image=lowerCamelCase , size=lowerCamelCase ) for image in images]
if do_rescale:
_snake_case = [self.rescale(image=lowerCamelCase , scale=lowerCamelCase ) for image in images]
if do_normalize:
_snake_case = [self.normalize(image=lowerCamelCase , mean=lowerCamelCase , std=lowerCamelCase ) for image in images]
_snake_case = [to_channel_dimension_format(lowerCamelCase , lowerCamelCase ) for image in images]
_snake_case = {"pixel_values": images}
return BatchFeature(data=lowerCamelCase , tensor_type=lowerCamelCase )
| 672 | 0 |
"""simple docstring"""
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def __snake_case ( ):
"""simple docstring"""
raise RuntimeError('''CUDA out of memory.''' )
class snake_case_ ( nn.Module ):
"""simple docstring"""
def __init__( self) -> Any:
super().__init__()
UpperCamelCase = nn.Linear(3 , 4)
UpperCamelCase = nn.BatchNormad(4)
UpperCamelCase = nn.Linear(4 , 5)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Union[str, Any]:
return self.lineara(self.batchnorm(self.lineara(lowerCamelCase_)))
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self) -> List[Any]:
UpperCamelCase = []
@find_executable_batch_size(starting_batch_size=1_2_8)
def mock_training_loop_function(lowerCamelCase_):
nonlocal batch_sizes
batch_sizes.append(lowerCamelCase_)
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(lowerCamelCase_ , [1_2_8, 6_4, 3_2, 1_6, 8])
def UpperCAmelCase__ ( self) -> Optional[Any]:
UpperCamelCase = []
@find_executable_batch_size(starting_batch_size=1_2_8)
def mock_training_loop_function(lowerCamelCase_ , lowerCamelCase_):
nonlocal batch_sizes
batch_sizes.append(lowerCamelCase_)
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
UpperCamelCase , UpperCamelCase = mock_training_loop_function('''hello''')
self.assertListEqual(lowerCamelCase_ , [1_2_8, 6_4, 3_2, 1_6, 8])
self.assertListEqual([bs, arga] , [8, '''hello'''])
def UpperCAmelCase__ ( self) -> Tuple:
@find_executable_batch_size(starting_batch_size=0)
def mock_training_loop_function(lowerCamelCase_):
pass
with self.assertRaises(lowerCamelCase_) as cm:
mock_training_loop_function()
self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0])
def UpperCAmelCase__ ( self) -> List[Any]:
@find_executable_batch_size(starting_batch_size=1_6)
def mock_training_loop_function(lowerCamelCase_):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(lowerCamelCase_) as cm:
mock_training_loop_function()
self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0])
def UpperCAmelCase__ ( self) -> Union[str, Any]:
@find_executable_batch_size(starting_batch_size=1_2_8)
def mock_training_loop_function(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(lowerCamelCase_) as cm:
mock_training_loop_function(1_2_8 , '''hello''' , '''world''')
self.assertIn('''Batch size was passed into `f`''' , cm.exception.args[0])
self.assertIn('''`f(arg1=\'hello\', arg2=\'world\')''' , cm.exception.args[0])
def UpperCAmelCase__ ( self) -> Dict:
@find_executable_batch_size(starting_batch_size=1_6)
def mock_training_loop_function(lowerCamelCase_):
raise ValueError('''Oops, we had an error!''')
with self.assertRaises(lowerCamelCase_) as cm:
mock_training_loop_function()
self.assertIn('''Oops, we had an error!''' , cm.exception.args[0])
@require_cuda
def UpperCAmelCase__ ( self) -> Optional[int]:
UpperCamelCase = torch.cuda.memory_allocated()
UpperCamelCase = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , lowerCamelCase_)
UpperCamelCase = release_memory(lowerCamelCase_)
self.assertEqual(torch.cuda.memory_allocated() , lowerCamelCase_) | 34 |
'''simple docstring'''
import baseaa
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return baseaa.aaaencode(string.encode("utf-8" ) )
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return baseaa.aaadecode(SCREAMING_SNAKE_CASE__ ).decode("utf-8" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 672 | 0 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class lowercase ( _UpperCAmelCase ):
lowerCamelCase : Union[str, Any] = (
'''This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image.'''
'''It takes two arguments named `image` which should be the original image, and `label` which should be a text '''
'''describing the elements what should be identified in the segmentation mask. The tool returns the mask.'''
)
lowerCamelCase : int = '''CIDAS/clipseg-rd64-refined'''
lowerCamelCase : Optional[int] = '''image_segmenter'''
lowerCamelCase : int = CLIPSegForImageSegmentation
lowerCamelCase : Tuple = ['''image''', '''text''']
lowerCamelCase : List[str] = ['''image''']
def __init__( self : Optional[Any] , *_lowercase : List[Any] , **_lowercase : List[str] ):
requires_backends(self , ['''vision'''] )
super().__init__(*_lowercase , **_lowercase )
def lowercase__ ( self : str , _lowercase : "Image" , _lowercase : str ):
return self.pre_processor(text=[label] , images=[image] , padding=_lowercase , return_tensors='''pt''' )
def lowercase__ ( self : int , _lowercase : Dict ):
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.model(**_lowercase ).logits
return logits
def lowercase__ ( self : Tuple , _lowercase : str ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = outputs.cpu().detach().numpy()
SCREAMING_SNAKE_CASE__ : List[Any] = 0
SCREAMING_SNAKE_CASE__ : Tuple = 1
return Image.fromarray((array * 2_55).astype(np.uinta ) )
| 35 |
'''simple docstring'''
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 672 | 0 |
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase : List[str] = logging.get_logger(__name__)
__lowercase : List[Any] = {
'''facebook/encodec_24khz''': '''https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json''',
'''facebook/encodec_48khz''': '''https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json''',
}
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : Dict = '''encodec'''
def __init__( self ,SCREAMING_SNAKE_CASE_=[1.5, 3.0, 6.0, 12.0, 24.0] ,SCREAMING_SNAKE_CASE_=24000 ,SCREAMING_SNAKE_CASE_=1 ,SCREAMING_SNAKE_CASE_=False ,SCREAMING_SNAKE_CASE_=None ,SCREAMING_SNAKE_CASE_=None ,SCREAMING_SNAKE_CASE_=128 ,SCREAMING_SNAKE_CASE_=32 ,SCREAMING_SNAKE_CASE_=1 ,SCREAMING_SNAKE_CASE_=[8, 5, 4, 2] ,SCREAMING_SNAKE_CASE_="weight_norm" ,SCREAMING_SNAKE_CASE_=7 ,SCREAMING_SNAKE_CASE_=7 ,SCREAMING_SNAKE_CASE_=3 ,SCREAMING_SNAKE_CASE_=2 ,SCREAMING_SNAKE_CASE_=True ,SCREAMING_SNAKE_CASE_="reflect" ,SCREAMING_SNAKE_CASE_=2 ,SCREAMING_SNAKE_CASE_=2 ,SCREAMING_SNAKE_CASE_=1.0 ,SCREAMING_SNAKE_CASE_=1024 ,SCREAMING_SNAKE_CASE_=None ,SCREAMING_SNAKE_CASE_=True ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
snake_case : Optional[Any] = target_bandwidths
snake_case : Tuple = sampling_rate
snake_case : str = audio_channels
snake_case : int = normalize
snake_case : Tuple = chunk_length_s
snake_case : Tuple = overlap
snake_case : Tuple = hidden_size
snake_case : Dict = num_filters
snake_case : List[Any] = num_residual_layers
snake_case : Optional[Any] = upsampling_ratios
snake_case : Union[str, Any] = norm_type
snake_case : List[Any] = kernel_size
snake_case : List[str] = last_kernel_size
snake_case : List[Any] = residual_kernel_size
snake_case : int = dilation_growth_rate
snake_case : str = use_causal_conv
snake_case : Tuple = pad_mode
snake_case : int = compress
snake_case : str = num_lstm_layers
snake_case : List[Any] = trim_right_ratio
snake_case : Any = codebook_size
snake_case : List[Any] = codebook_dim if codebook_dim is not None else hidden_size
snake_case : Dict = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
F"""self.norm_type must be one of `\"weight_norm\"`, `\"time_group_norm\"`), got {self.norm_type}""" )
super().__init__(**SCREAMING_SNAKE_CASE_ )
@property
def snake_case_ ( self ):
'''simple docstring'''
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def snake_case_ ( self ):
'''simple docstring'''
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 ,int((1.0 - self.overlap) * self.chunk_length ) )
@property
def snake_case_ ( self ):
'''simple docstring'''
snake_case : Optional[Any] = np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def snake_case_ ( self ):
'''simple docstring'''
return int(1000 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
| 36 |
'''simple docstring'''
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class __SCREAMING_SNAKE_CASE ( datasets.BeamBasedBuilder ):
'''simple docstring'''
def UpperCamelCase( self ):
return datasets.DatasetInfo(
features=datasets.Features({"content": datasets.Value("string" )} ) , supervised_keys=lowerCamelCase , )
def UpperCamelCase( self , lowerCamelCase , lowerCamelCase ):
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"examples": get_test_dummy_examples()} )]
def UpperCamelCase( self , lowerCamelCase , lowerCamelCase ):
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(lowerCamelCase )
class __SCREAMING_SNAKE_CASE ( datasets.BeamBasedBuilder ):
'''simple docstring'''
def UpperCamelCase( self ):
return datasets.DatasetInfo(
features=datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string" )} )} ) , supervised_keys=lowerCamelCase , )
def UpperCamelCase( self , lowerCamelCase , lowerCamelCase ):
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"examples": get_test_nested_examples()} )
]
def UpperCamelCase( self , lowerCamelCase , lowerCamelCase ):
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(lowerCamelCase )
def snake_case_ ( ):
'''simple docstring'''
return [(i, {"content": content}) for i, content in enumerate(["foo", "bar", "foobar"] )]
def snake_case_ ( ):
'''simple docstring'''
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(["foo", "bar", "foobar"] )]
class __SCREAMING_SNAKE_CASE ( __UpperCamelCase ):
'''simple docstring'''
@require_beam
def UpperCamelCase( self ):
_snake_case = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
_snake_case = DummyBeamDataset(cache_dir=lowerCamelCase , beam_runner="DirectRunner" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(lowerCamelCase , builder.name , "default" , "0.0.0" , F'''{builder.name}-train.arrow''' ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"content": datasets.Value("string" )} ) )
_snake_case = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , lowerCamelCase )
self.assertEqual(dset["train"].info.splits["train"].num_examples , lowerCamelCase )
self.assertDictEqual(dset["train"][0] , get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset["train"][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(lowerCamelCase , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
@require_beam
def UpperCamelCase( self ):
import apache_beam as beam
_snake_case = beam.io.parquetio.WriteToParquet
_snake_case = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
_snake_case = DummyBeamDataset(cache_dir=lowerCamelCase , beam_runner="DirectRunner" )
with patch("apache_beam.io.parquetio.WriteToParquet" ) as write_parquet_mock:
_snake_case = partial(lowerCamelCase , num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
lowerCamelCase , builder.name , "default" , "0.0.0" , F'''{builder.name}-train-00000-of-00002.arrow''' ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
lowerCamelCase , builder.name , "default" , "0.0.0" , F'''{builder.name}-train-00000-of-00002.arrow''' ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"content": datasets.Value("string" )} ) )
_snake_case = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , lowerCamelCase )
self.assertEqual(dset["train"].info.splits["train"].num_examples , lowerCamelCase )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset["train"]["content"] ) , sorted(["foo", "bar", "foobar"] ) )
self.assertTrue(
os.path.exists(os.path.join(lowerCamelCase , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
@require_beam
def UpperCamelCase( self ):
with tempfile.TemporaryDirectory() as tmp_cache_dir:
_snake_case = DummyBeamDataset(cache_dir=lowerCamelCase )
self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare )
@require_beam
def UpperCamelCase( self ):
_snake_case = len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
_snake_case = NestedBeamDataset(cache_dir=lowerCamelCase , beam_runner="DirectRunner" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(lowerCamelCase , builder.name , "default" , "0.0.0" , F'''{builder.name}-train.arrow''' ) ) )
self.assertDictEqual(
builder.info.features , datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string" )} )} ) )
_snake_case = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , lowerCamelCase )
self.assertEqual(dset["train"].info.splits["train"].num_examples , lowerCamelCase )
self.assertDictEqual(dset["train"][0] , get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset["train"][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(lowerCamelCase , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
| 672 | 0 |
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
UpperCamelCase : Tuple = logging.get_logger(__name__)
UpperCamelCase : List[Any] = {
"""google/umt5-small""": """https://huggingface.co/google/umt5-small/resolve/main/config.json""",
# See all umt5 models at https://huggingface.co/models?filter=umt5
}
class A__ ( A__ ):
"""simple docstring"""
_lowercase = 'umt5'
_lowercase = ['past_key_values']
def __init__( self : Union[str, Any] , lowerCamelCase__ : List[Any]=250_112 , lowerCamelCase__ : str=512 , lowerCamelCase__ : Optional[Any]=64 , lowerCamelCase__ : List[Any]=1_024 , lowerCamelCase__ : int=8 , lowerCamelCase__ : List[str]=None , lowerCamelCase__ : Optional[Any]=6 , lowerCamelCase__ : int=32 , lowerCamelCase__ : Union[str, Any]=128 , lowerCamelCase__ : Tuple=0.1 , lowerCamelCase__ : Optional[Any]=1E-6 , lowerCamelCase__ : Union[str, Any]=1.0 , lowerCamelCase__ : Union[str, Any]="gated-gelu" , lowerCamelCase__ : Any=True , lowerCamelCase__ : List[str]=True , lowerCamelCase__ : Any="T5Tokenizer" , lowerCamelCase__ : Union[str, Any]=True , lowerCamelCase__ : Optional[Any]=0 , lowerCamelCase__ : List[Any]=1 , lowerCamelCase__ : List[str]=0 , **lowerCamelCase__ : Any , ):
super().__init__(
is_encoder_decoder=lowerCamelCase__ , tokenizer_class=lowerCamelCase__ , tie_word_embeddings=lowerCamelCase__ , pad_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ , decoder_start_token_id=lowerCamelCase__ , **lowerCamelCase__ , )
a__ : Dict = vocab_size
a__ : List[Any] = d_model
a__ : Optional[Any] = d_kv
a__ : Optional[int] = d_ff
a__ : List[Any] = num_layers
a__ : int = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
a__ : str = num_heads
a__ : Optional[int] = relative_attention_num_buckets
a__ : Any = relative_attention_max_distance
a__ : int = dropout_rate
a__ : Union[str, Any] = layer_norm_epsilon
a__ : Optional[int] = initializer_factor
a__ : Optional[int] = feed_forward_proj
a__ : Tuple = use_cache
a__ : List[Any] = self.feed_forward_proj.split("-" )
a__ : Optional[int] = act_info[-1]
a__ : Union[str, Any] = act_info[0] == "gated"
if len(lowerCamelCase__ ) > 1 and act_info[0] != "gated" or len(lowerCamelCase__ ) > 2:
raise ValueError(
f'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'''
"Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. "
"'gated-gelu' or 'relu'" )
if feed_forward_proj == "gated-gelu":
a__ : Optional[int] = "gelu_new"
@property
def _UpperCamelCase( self : int ):
return self.d_model
@property
def _UpperCamelCase( self : int ):
return self.num_heads
@property
def _UpperCamelCase( self : Dict ):
return self.num_layers
class A__ ( A__ ):
"""simple docstring"""
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
def _UpperCamelCase( self : Optional[Any] ):
a__ : Dict = {
"input_ids": {0: "batch", 1: "encoder_sequence"},
"attention_mask": {0: "batch", 1: "encoder_sequence"},
}
if self.use_past:
a__ : Tuple = "past_encoder_sequence + sequence"
a__ : Dict = {0: "batch"}
a__ : Union[str, Any] = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
a__ : Tuple = {0: "batch", 1: "decoder_sequence"}
a__ : List[str] = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(lowerCamelCase__ , direction="inputs" )
return common_inputs
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
def _UpperCamelCase( self : int ):
return 13
@property
def _UpperCamelCase( self : List[str] ):
return 5E-4
| 37 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
__magic_name__ : Optional[int] = False
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
pass
@slow
@require_torch_gpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase( self ):
_snake_case = VersatileDiffusionImageVariationPipeline.from_pretrained("shi-labs/versatile-diffusion" )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
_snake_case = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" )
_snake_case = torch.manual_seed(0 )
_snake_case = pipe(
image=lowerCamelCase , generator=lowerCamelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" , ).images
_snake_case = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
_snake_case = np.array([0.0441, 0.0469, 0.0507, 0.0575, 0.0632, 0.0650, 0.0865, 0.0909, 0.0945] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 672 | 0 |
'''simple docstring'''
def UpperCamelCase__ ( __magic_name__ : str = "The quick brown fox jumps over the lazy dog" , ) -> bool:
'''simple docstring'''
snake_case__ : Tuple = set()
# Replace all the whitespace in our sentence
snake_case__ : List[Any] = input_str.replace(""" """ , """""" )
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower() )
return len(__magic_name__ ) == 26
def UpperCamelCase__ ( __magic_name__ : str = "The quick brown fox jumps over the lazy dog" , ) -> bool:
'''simple docstring'''
snake_case__ : Optional[Any] = [False] * 26
for char in input_str:
if char.islower():
snake_case__ : int = True
elif char.isupper():
snake_case__ : Optional[Any] = True
return all(__magic_name__ )
def UpperCamelCase__ ( __magic_name__ : str = "The quick brown fox jumps over the lazy dog" , ) -> bool:
'''simple docstring'''
return len({char for char in input_str.lower() if char.isalpha()} ) == 26
def UpperCamelCase__ ( ) -> None:
'''simple docstring'''
from timeit import timeit
snake_case__ : Optional[Any] = """from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest"""
print(timeit("""is_pangram()""" , setup=__magic_name__ ) )
print(timeit("""is_pangram_faster()""" , setup=__magic_name__ ) )
print(timeit("""is_pangram_fastest()""" , setup=__magic_name__ ) )
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 38 |
'''simple docstring'''
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.text import TextDatasetReader
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
_snake_case = tmp_path / "cache"
_snake_case = {"text": "string"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_snake_case = TextDatasetReader(SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ , keep_in_memory=SCREAMING_SNAKE_CASE__ ).read()
_check_text_dataset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@pytest.mark.parametrize(
"features" , [
None,
{"text": "string"},
{"text": "int32"},
{"text": "float32"},
] , )
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
_snake_case = tmp_path / "cache"
_snake_case = {"text": "string"}
_snake_case = features.copy() if features else default_expected_features
_snake_case = (
Features({feature: Value(SCREAMING_SNAKE_CASE__ ) for feature, dtype in features.items()} ) if features is not None else None
)
_snake_case = TextDatasetReader(SCREAMING_SNAKE_CASE__ , features=SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ ).read()
_check_text_dataset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
_snake_case = tmp_path / "cache"
_snake_case = {"text": "string"}
_snake_case = TextDatasetReader(SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ , split=SCREAMING_SNAKE_CASE__ ).read()
_check_text_dataset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type" , [str, list] )
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if issubclass(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
_snake_case = text_path
elif issubclass(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
_snake_case = [text_path]
_snake_case = tmp_path / "cache"
_snake_case = {"text": "string"}
_snake_case = TextDatasetReader(SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ ).read()
_check_text_dataset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=("train",) ):
'''simple docstring'''
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
for split in splits:
_snake_case = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
_snake_case = tmp_path / "cache"
_snake_case = {"text": "string"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_snake_case = TextDatasetReader({"train": text_path} , cache_dir=SCREAMING_SNAKE_CASE__ , keep_in_memory=SCREAMING_SNAKE_CASE__ ).read()
_check_text_datasetdict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@pytest.mark.parametrize(
"features" , [
None,
{"text": "string"},
{"text": "int32"},
{"text": "float32"},
] , )
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
_snake_case = tmp_path / "cache"
# CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
_snake_case = {"text": "string"}
_snake_case = features.copy() if features else default_expected_features
_snake_case = (
Features({feature: Value(SCREAMING_SNAKE_CASE__ ) for feature, dtype in features.items()} ) if features is not None else None
)
_snake_case = TextDatasetReader({"train": text_path} , features=SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ ).read()
_check_text_datasetdict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if split:
_snake_case = {split: text_path}
else:
_snake_case = "train"
_snake_case = {"train": text_path, "test": text_path}
_snake_case = tmp_path / "cache"
_snake_case = {"text": "string"}
_snake_case = TextDatasetReader(SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ ).read()
_check_text_datasetdict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
| 672 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Any , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Any=7 , _UpperCamelCase : Tuple=3 , _UpperCamelCase : Dict=1_8 , _UpperCamelCase : Dict=3_0 , _UpperCamelCase : Optional[Any]=4_0_0 , _UpperCamelCase : Optional[int]=True , _UpperCamelCase : List[Any]=None , _UpperCamelCase : Dict=True , _UpperCamelCase : Optional[Any]=None , _UpperCamelCase : Any=True , ) ->Optional[Any]:
snake_case_ = size if size is not None else {'''shortest_edge''': 2_0}
snake_case_ = crop_size if crop_size is not None else {'''height''': 1_8, '''width''': 1_8}
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = num_channels
snake_case_ = image_size
snake_case_ = min_resolution
snake_case_ = max_resolution
snake_case_ = do_resize
snake_case_ = size
snake_case_ = do_center_crop
snake_case_ = crop_size
snake_case_ = do_flip_channel_order
def snake_case__( self : Any ) ->Optional[Any]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class snake_case_ ( __A , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = MobileViTImageProcessor if is_vision_available() else None
def snake_case__( self : Dict ) ->Optional[int]:
snake_case_ = MobileViTImageProcessingTester(self )
@property
def snake_case__( self : str ) ->List[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case__( self : List[str] ) ->int:
snake_case_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCamelCase , '''do_resize''' ) )
self.assertTrue(hasattr(_UpperCamelCase , '''size''' ) )
self.assertTrue(hasattr(_UpperCamelCase , '''do_center_crop''' ) )
self.assertTrue(hasattr(_UpperCamelCase , '''center_crop''' ) )
self.assertTrue(hasattr(_UpperCamelCase , '''do_flip_channel_order''' ) )
def snake_case__( self : Tuple ) ->Any:
snake_case_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 2_0} )
self.assertEqual(image_processor.crop_size , {'''height''': 1_8, '''width''': 1_8} )
snake_case_ = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 4_2} )
self.assertEqual(image_processor.crop_size , {'''height''': 8_4, '''width''': 8_4} )
def snake_case__( self : int ) ->str:
pass
def snake_case__( self : Optional[int] ) ->Dict:
# Initialize image_processing
snake_case_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase , Image.Image )
# Test not batched input
snake_case_ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
snake_case_ = image_processing(_UpperCamelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def snake_case__( self : str ) ->Optional[Any]:
# Initialize image_processing
snake_case_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase , numpify=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase , np.ndarray )
# Test not batched input
snake_case_ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
snake_case_ = image_processing(_UpperCamelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def snake_case__( self : int ) ->List[str]:
# Initialize image_processing
snake_case_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase , torchify=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase , torch.Tensor )
# Test not batched input
snake_case_ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
snake_case_ = image_processing(_UpperCamelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , ) | 39 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__magic_name__ : Any = {
"""configuration_swinv2""": ["""SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Swinv2Config"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : Dict = [
"""SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Swinv2ForImageClassification""",
"""Swinv2ForMaskedImageModeling""",
"""Swinv2Model""",
"""Swinv2PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swinva import (
SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinvaForImageClassification,
SwinvaForMaskedImageModeling,
SwinvaModel,
SwinvaPreTrainedModel,
)
else:
import sys
__magic_name__ : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 672 | 0 |
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = ['''model.decoder.embed_positions.weights''']
def UpperCamelCase ( snake_case__ : int ) -> Tuple:
if "emb" in name:
UpperCamelCase : Any = name.replace('emb' , 'model.decoder.embed_tokens' )
if "transformer" in name:
UpperCamelCase : Optional[Any] = name.replace('transformer' , 'model.decoder' )
if "cross_attention" in name:
UpperCamelCase : Dict = name.replace('cross_attention' , 'encoder_attn' )
if "linear1" in name:
UpperCamelCase : Optional[Any] = name.replace('linear1' , 'fc1' )
if "linear2" in name:
UpperCamelCase : List[Any] = name.replace('linear2' , 'fc2' )
if "norm1" in name:
UpperCamelCase : Any = name.replace('norm1' , 'self_attn_layer_norm' )
if "norm_cross" in name:
UpperCamelCase : List[str] = name.replace('norm_cross' , 'encoder_attn_layer_norm' )
if "norm2" in name:
UpperCamelCase : int = name.replace('norm2' , 'final_layer_norm' )
if "out_norm" in name:
UpperCamelCase : Dict = name.replace('out_norm' , 'model.decoder.layer_norm' )
if "linears" in name:
UpperCamelCase : List[Any] = name.replace('linears' , 'lm_heads' )
if "condition_provider.conditioners.description.output_proj" in name:
UpperCamelCase : List[str] = name.replace('condition_provider.conditioners.description.output_proj' , 'enc_to_dec_proj' )
return name
def UpperCamelCase ( snake_case__ : OrderedDict , snake_case__ : int ) -> Tuple[Dict, Dict]:
UpperCamelCase : int = list(state_dict.keys() )
UpperCamelCase : Dict = {}
for key in keys:
UpperCamelCase : List[str] = state_dict.pop(snake_case__ )
UpperCamelCase : str = rename_keys(snake_case__ )
if "in_proj_weight" in key:
# split fused qkv proj
UpperCamelCase : Any = val[:hidden_size, :]
UpperCamelCase : Tuple = val[hidden_size : 2 * hidden_size, :]
UpperCamelCase : Optional[int] = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
UpperCamelCase : Union[str, Any] = val
else:
UpperCamelCase : Optional[Any] = val
return state_dict, enc_dec_proj_state_dict
def UpperCamelCase ( snake_case__ : str ) -> MusicgenDecoderConfig:
if checkpoint == "small":
# default config values
UpperCamelCase : Tuple = 1024
UpperCamelCase : Union[str, Any] = 24
UpperCamelCase : Any = 16
elif checkpoint == "medium":
UpperCamelCase : str = 1536
UpperCamelCase : List[str] = 48
UpperCamelCase : Optional[Any] = 24
elif checkpoint == "large":
UpperCamelCase : Tuple = 2048
UpperCamelCase : Dict = 48
UpperCamelCase : List[str] = 32
else:
raise ValueError(F"""Checkpoint should be one of `['small', 'medium', 'large']`, got {checkpoint}.""" )
UpperCamelCase : Union[str, Any] = MusicgenDecoderConfig(
hidden_size=snake_case__ , ffn_dim=hidden_size * 4 , num_hidden_layers=snake_case__ , num_attention_heads=snake_case__ , )
return config
@torch.no_grad()
def UpperCamelCase ( snake_case__ : Optional[Any] , snake_case__ : Dict=None , snake_case__ : Tuple=None , snake_case__ : Tuple="cpu" ) -> List[str]:
UpperCamelCase : List[str] = MusicGen.get_pretrained(snake_case__ , device=snake_case__ )
UpperCamelCase : Any = decoder_config_from_checkpoint(snake_case__ )
UpperCamelCase : int = fairseq_model.lm.state_dict()
UpperCamelCase , UpperCamelCase : List[str] = rename_state_dict(
snake_case__ , hidden_size=decoder_config.hidden_size )
UpperCamelCase : Optional[int] = TaEncoderModel.from_pretrained('t5-base' )
UpperCamelCase : int = EncodecModel.from_pretrained('facebook/encodec_32khz' )
UpperCamelCase : List[Any] = MusicgenForCausalLM(snake_case__ ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
UpperCamelCase , UpperCamelCase : Optional[int] = decoder.load_state_dict(snake_case__ , strict=snake_case__ )
for key in missing_keys.copy():
if key.startswith(('text_encoder', 'audio_encoder') ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(snake_case__ )
if len(snake_case__ ) > 0:
raise ValueError(F"""Missing key(s) in state_dict: {missing_keys}""" )
if len(snake_case__ ) > 0:
raise ValueError(F"""Unexpected key(s) in state_dict: {unexpected_keys}""" )
# init the composite model
UpperCamelCase : int = MusicgenForConditionalGeneration(text_encoder=snake_case__ , audio_encoder=snake_case__ , decoder=snake_case__ )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(snake_case__ )
# check we can do a forward pass
UpperCamelCase : int = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
UpperCamelCase : str = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
UpperCamelCase : Union[str, Any] = model(input_ids=snake_case__ , decoder_input_ids=snake_case__ ).logits
if logits.shape != (8, 1, 2048):
raise ValueError('Incorrect shape for logits' )
# now construct the processor
UpperCamelCase : List[str] = AutoTokenizer.from_pretrained('t5-base' )
UpperCamelCase : Any = AutoFeatureExtractor.from_pretrained('facebook/encodec_32khz' , padding_side='left' )
UpperCamelCase : List[Any] = MusicgenProcessor(feature_extractor=snake_case__ , tokenizer=snake_case__ )
# set the appropriate bos/pad token ids
UpperCamelCase : Any = 2048
UpperCamelCase : Any = 2048
# set other default generation config params
UpperCamelCase : Dict = int(30 * audio_encoder.config.frame_rate )
UpperCamelCase : str = True
UpperCamelCase : int = 3.0
if pytorch_dump_folder is not None:
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
logger.info(F"""Saving model {checkpoint} to {pytorch_dump_folder}""" )
model.save_pretrained(snake_case__ )
processor.save_pretrained(snake_case__ )
if repo_id:
logger.info(F"""Pushing model {checkpoint} to {repo_id}""" )
model.push_to_hub(snake_case__ )
processor.push_to_hub(snake_case__ )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint''',
default='''small''',
type=str,
help='''Checkpoint size of the MusicGen model you\'d like to convert. Can be one of: `[\'small\', \'medium\', \'large\']`.''',
)
parser.add_argument(
'''--pytorch_dump_folder''',
required=True,
default=None,
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
parser.add_argument(
'''--device''', default='''cpu''', type=str, help='''Torch device to run the conversion, either cpu or cuda.'''
)
__UpperCAmelCase = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 40 |
'''simple docstring'''
import math
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return math.pow(SCREAMING_SNAKE_CASE__ , 2 ) - a
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return 2 * x
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
_snake_case = 2.0
while start <= a:
_snake_case = math.pow(SCREAMING_SNAKE_CASE__ , 2 )
return start
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = 99_99 , SCREAMING_SNAKE_CASE__ = 0.00000000000001 ):
'''simple docstring'''
if a < 0:
raise ValueError("math domain error" )
_snake_case = get_initial_point(SCREAMING_SNAKE_CASE__ )
for _ in range(SCREAMING_SNAKE_CASE__ ):
_snake_case = value
_snake_case = value - fx(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) / fx_derivative(SCREAMING_SNAKE_CASE__ )
if abs(prev_value - value ) < tolerance:
return value
return value
if __name__ == "__main__":
from doctest import testmod
testmod()
| 672 | 0 |
'''simple docstring'''
import os
# Precomputes a list of the 100 first triangular numbers
lowerCAmelCase__ = [int(0.5 * n * (n + 1)) for n in range(1, 101)]
def _A ( ):
"""simple docstring"""
__lowercase = os.path.dirname(os.path.realpath(A__ ) )
__lowercase = os.path.join(A__ , '''words.txt''' )
__lowercase = ''''''
with open(A__ ) as f:
__lowercase = f.readline()
__lowercase = [word.strip('''"''' ) for word in words.strip('''\r\n''' ).split(''',''' )]
__lowercase = [
word
for word in [sum(ord(A__ ) - 64 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(A__ )
if __name__ == "__main__":
print(solution())
| 41 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ : Optional[int] = logging.get_logger(__name__)
__magic_name__ : Optional[int] = {
"""microsoft/git-base""": """https://huggingface.co/microsoft/git-base/resolve/main/config.json""",
}
class __SCREAMING_SNAKE_CASE ( __UpperCamelCase ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = '''git_vision_model'''
def __init__( self , lowerCamelCase=768 , lowerCamelCase=3_072 , lowerCamelCase=12 , lowerCamelCase=12 , lowerCamelCase=3 , lowerCamelCase=224 , lowerCamelCase=16 , lowerCamelCase="quick_gelu" , lowerCamelCase=1e-5 , lowerCamelCase=0.0 , lowerCamelCase=0.02 , **lowerCamelCase , ):
super().__init__(**lowerCamelCase )
_snake_case = hidden_size
_snake_case = intermediate_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = num_channels
_snake_case = patch_size
_snake_case = image_size
_snake_case = initializer_range
_snake_case = attention_dropout
_snake_case = layer_norm_eps
_snake_case = hidden_act
@classmethod
def UpperCamelCase( cls , lowerCamelCase , **lowerCamelCase ):
cls._set_token_in_kwargs(lowerCamelCase )
_snake_case , _snake_case = cls.get_config_dict(lowerCamelCase , **lowerCamelCase )
# get the vision config dict if we are loading from GITConfig
if config_dict.get("model_type" ) == "git":
_snake_case = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(lowerCamelCase , **lowerCamelCase )
class __SCREAMING_SNAKE_CASE ( __UpperCamelCase ):
'''simple docstring'''
UpperCAmelCase__ : int = '''git'''
def __init__( self , lowerCamelCase=None , lowerCamelCase=30_522 , lowerCamelCase=768 , lowerCamelCase=6 , lowerCamelCase=12 , lowerCamelCase=3_072 , lowerCamelCase="gelu" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=1_024 , lowerCamelCase=0.02 , lowerCamelCase=1e-12 , lowerCamelCase=0 , lowerCamelCase="absolute" , lowerCamelCase=True , lowerCamelCase=False , lowerCamelCase=101 , lowerCamelCase=102 , lowerCamelCase=None , **lowerCamelCase , ):
super().__init__(bos_token_id=lowerCamelCase , eos_token_id=lowerCamelCase , pad_token_id=lowerCamelCase , **lowerCamelCase )
if vision_config is None:
_snake_case = {}
logger.info("vision_config is None. initializing the GitVisionConfig with default values." )
_snake_case = GitVisionConfig(**lowerCamelCase )
_snake_case = vocab_size
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = hidden_act
_snake_case = intermediate_size
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = max_position_embeddings
_snake_case = initializer_range
_snake_case = layer_norm_eps
_snake_case = position_embedding_type
_snake_case = use_cache
_snake_case = tie_word_embeddings
_snake_case = num_image_with_embedding
_snake_case = bos_token_id
_snake_case = eos_token_id
def UpperCamelCase( self ):
_snake_case = copy.deepcopy(self.__dict__ )
_snake_case = self.vision_config.to_dict()
_snake_case = self.__class__.model_type
return output
| 672 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = {
"facebook/data2vec-text-base": "https://huggingface.co/data2vec/resolve/main/config.json",
}
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = 'data2vec-text'
def __init__( self , SCREAMING_SNAKE_CASE_=30522 , SCREAMING_SNAKE_CASE_=768 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=3072 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=512 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=1E-12 , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_="absolute" , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ , ) -> str:
'''simple docstring'''
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = vocab_size
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = hidden_act
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = type_vocab_size
lowerCamelCase_ = initializer_range
lowerCamelCase_ = layer_norm_eps
lowerCamelCase_ = position_embedding_type
lowerCamelCase_ = use_cache
lowerCamelCase_ = classifier_dropout
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
@property
def UpperCamelCase( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
lowerCamelCase_ = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
lowerCamelCase_ = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 42 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import tensorflow as tf
from .utils import logging
__magic_name__ : Dict = logging.get_logger(__name__)
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if isinstance(SCREAMING_SNAKE_CASE__ , np.ndarray ):
return list(tensor.shape )
_snake_case = tf.shape(SCREAMING_SNAKE_CASE__ )
if tensor.shape == tf.TensorShape(SCREAMING_SNAKE_CASE__ ):
return dynamic
_snake_case = tensor.shape.as_list()
return [dynamic[i] if s is None else s for i, s in enumerate(SCREAMING_SNAKE_CASE__ )]
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None ):
'''simple docstring'''
return tf.nn.softmax(logits=logits + 1E-9 , axis=SCREAMING_SNAKE_CASE__ , name=SCREAMING_SNAKE_CASE__ )
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=1E-5 , SCREAMING_SNAKE_CASE__=-1 ):
'''simple docstring'''
if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
raise NotImplementedError("Only 1D weight and bias tensors are supported for now, with only a single axis." )
# Get mean and variance on the axis to be normalized
_snake_case , _snake_case = tf.nn.moments(SCREAMING_SNAKE_CASE__ , axes=[axis] , keepdims=SCREAMING_SNAKE_CASE__ )
if axis != -1:
# Reshape scale and weight to have the same rank as inputs, but with 1 dimensions
# on every dimension except axis
_snake_case = [1] * inputs.shape.rank
_snake_case = shape_list(SCREAMING_SNAKE_CASE__ )[axis]
_snake_case = tf.reshape(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
_snake_case = tf.reshape(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Compute layer normalization using the batch_normalization
# function.
_snake_case = tf.nn.batch_normalization(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , offset=SCREAMING_SNAKE_CASE__ , scale=SCREAMING_SNAKE_CASE__ , variance_epsilon=SCREAMING_SNAKE_CASE__ , )
return outputs
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=0 , SCREAMING_SNAKE_CASE__=-1 ):
'''simple docstring'''
if end_dim < 0:
end_dim += input.shape.rank
if start_dim < 0:
start_dim += input.shape.rank
if start_dim == end_dim:
return input
_snake_case = tf.shape(SCREAMING_SNAKE_CASE__ )
_snake_case = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] )
_snake_case = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] , axis=0 )
return tf.reshape(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE__ , tf.Tensor ):
_snake_case = tf.convert_to_tensor(SCREAMING_SNAKE_CASE__ ) # Catches stray NumPy inputs
if encoder_attention_mask.shape.rank == 3:
_snake_case = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.shape.rank == 2:
_snake_case = encoder_attention_mask[:, None, None, :]
# T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
# Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow
# /transformer/transformer_layers.py#L270
# encoder_extended_attention_mask = (encoder_extended_attention_mask ==
# encoder_extended_attention_mask.transpose(-1, -2))
_snake_case = (
tf.cast(1 , encoder_attention_mask.dtype ) - encoder_extended_attention_mask
) * encoder_extended_attention_mask.dtype.min
return encoder_extended_attention_mask
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = "input_ids" ):
'''simple docstring'''
tf.debugging.assert_less(
SCREAMING_SNAKE_CASE__ , tf.cast(SCREAMING_SNAKE_CASE__ , dtype=tensor.dtype ) , message=(
f'''The maximum value of {tensor_name} ({tf.math.reduce_max(SCREAMING_SNAKE_CASE__ )}) must be smaller than the embedding '''
f'''layer\'s input dimension ({embed_dim}). The likely cause is some problem at tokenization time.'''
) , )
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
_snake_case = 6_45_12
# Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT`
# because in that case even chunking the array would not make the saving
# possible.
_snake_case = [x for x in data if len(SCREAMING_SNAKE_CASE__ ) > HDF5_OBJECT_HEADER_LIMIT]
# Expecting this to never be true.
if bad_attributes:
raise RuntimeError(
"The following attributes cannot be saved to HDF5 file because "
f'''they are larger than {HDF5_OBJECT_HEADER_LIMIT} '''
f'''bytes: {bad_attributes}''' )
_snake_case = np.asarray(SCREAMING_SNAKE_CASE__ )
_snake_case = 1
_snake_case = np.array_split(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# This will never loop forever thanks to the test above.
while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ):
num_chunks += 1
_snake_case = np.array_split(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if num_chunks > 1:
for chunk_id, chunk_data in enumerate(SCREAMING_SNAKE_CASE__ ):
_snake_case = chunk_data
else:
_snake_case = data
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if name in group.attrs:
_snake_case = [n.decode("utf8" ) if hasattr(SCREAMING_SNAKE_CASE__ , "decode" ) else n for n in group.attrs[name]]
else:
_snake_case = []
_snake_case = 0
while "%s%d" % (name, chunk_id) in group.attrs:
data.extend(
[n.decode("utf8" ) if hasattr(SCREAMING_SNAKE_CASE__ , "decode" ) else n for n in group.attrs["%s%d" % (name, chunk_id)]] )
chunk_id += 1
return data
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def _expand_single_ad_tensor(SCREAMING_SNAKE_CASE__ ):
if isinstance(SCREAMING_SNAKE_CASE__ , tf.Tensor ) and t.shape.rank == 1:
return tf.expand_dims(SCREAMING_SNAKE_CASE__ , axis=-1 )
return t
return tf.nest.map_structure(_expand_single_ad_tensor , SCREAMING_SNAKE_CASE__ )
| 672 | 0 |
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
lowerCAmelCase = importlib.util.find_spec('s3fs') is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
lowerCAmelCase = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(f"""A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.""")
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if "://" in dataset_path:
lowercase__ = dataset_path.split('''://''' )[1]
return dataset_path
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if fs is not None and fs.protocol != "file":
return True
else:
return False
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = not is_remote_filesystem(SCREAMING_SNAKE_CASE )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(SCREAMING_SNAKE_CASE ) , fs._strip_protocol(SCREAMING_SNAKE_CASE ) )
else:
fs.mv(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , recursive=SCREAMING_SNAKE_CASE )
def _a ( ):
"""simple docstring"""
if hasattr(fsspec.asyn , '''reset_lock''' ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
lowercase__ = None
lowercase__ = None
lowercase__ = threading.Lock()
| 43 |
'''simple docstring'''
__magic_name__ : int = """Alexander Joslin"""
import operator as op
from .stack import Stack
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
_snake_case = {"*": op.mul, "/": op.truediv, "+": op.add, "-": op.sub}
_snake_case = Stack()
_snake_case = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(SCREAMING_SNAKE_CASE__ ) )
elif i in operators:
# RULE 2
operator_stack.push(SCREAMING_SNAKE_CASE__ )
elif i == ")":
# RULE 4
_snake_case = operator_stack.peek()
operator_stack.pop()
_snake_case = operand_stack.peek()
operand_stack.pop()
_snake_case = operand_stack.peek()
operand_stack.pop()
_snake_case = operators[opr](SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
operand_stack.push(SCREAMING_SNAKE_CASE__ )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
__magic_name__ : List[str] = """(5 + ((4 * 2) * (2 + 3)))"""
# answer = 45
print(F'{equation} = {dijkstras_two_stack_algorithm(equation)}')
| 672 | 0 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Iterator
class UpperCAmelCase__ :
def __init__( self : int,__A : int ):
_lowerCamelCase : List[str] = value
_lowerCamelCase : Node | None = None
_lowerCamelCase : Node | None = None
class UpperCAmelCase__ :
def __init__( self : str,__A : Node ):
_lowerCamelCase : Union[str, Any] = tree
def lowerCamelCase_ ( self : int,__A : Node | None ):
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left ) + self.depth_first_search(node.right )
)
def __iter__( self : Optional[Any] ):
yield self.depth_first_search(self.tree )
if __name__ == "__main__":
import doctest
doctest.testmod() | 44 |
'''simple docstring'''
from torch import nn
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(f'''Unsupported activation function: {act_fn}''' )
| 672 | 0 |
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .tokenization_wavaveca import WavaVecaCTCTokenizer
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
_snake_case : Optional[Any] = """Wav2Vec2FeatureExtractor"""
_snake_case : Optional[int] = """AutoTokenizer"""
def __init__( self :List[Any] , lowerCamelCase__ :str , lowerCamelCase__ :List[Any] ):
super().__init__(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ :Union[str, Any] = self.feature_extractor
UpperCamelCase__ :Optional[int] = False
@classmethod
def __a ( cls :Tuple , lowerCamelCase__ :Any , **lowerCamelCase__ :str ):
try:
return super().from_pretrained(lowerCamelCase__ , **lowerCamelCase__ )
except OSError:
warnings.warn(
f"""Loading a tokenizer inside {cls.__name__} from a config that does not"""
""" include a `tokenizer_class` attribute is deprecated and will be """
"""removed in v5. Please add `'tokenizer_class': 'Wav2Vec2CTCTokenizer'`"""
""" attribute to either your `config.json` or `tokenizer_config.json` """
"""file to suppress this warning: """ , lowerCamelCase__ , )
UpperCamelCase__ :List[str] = WavaVecaFeatureExtractor.from_pretrained(lowerCamelCase__ , **lowerCamelCase__ )
UpperCamelCase__ :Dict = WavaVecaCTCTokenizer.from_pretrained(lowerCamelCase__ , **lowerCamelCase__ )
return cls(feature_extractor=lowerCamelCase__ , tokenizer=lowerCamelCase__ )
def __call__( self :Tuple , *lowerCamelCase__ :Optional[int] , **lowerCamelCase__ :Optional[int] ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*lowerCamelCase__ , **lowerCamelCase__ )
if "raw_speech" in kwargs:
warnings.warn("""Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.""" )
UpperCamelCase__ :Optional[Any] = kwargs.pop("""raw_speech""" )
else:
UpperCamelCase__ :int = kwargs.pop("""audio""" , lowerCamelCase__ )
UpperCamelCase__ :List[Any] = kwargs.pop("""sampling_rate""" , lowerCamelCase__ )
UpperCamelCase__ :Union[str, Any] = kwargs.pop("""text""" , lowerCamelCase__ )
if len(lowerCamelCase__ ) > 0:
UpperCamelCase__ :Tuple = args[0]
UpperCamelCase__ :Optional[int] = args[1:]
if audio is None and text is None:
raise ValueError("""You need to specify either an `audio` or `text` input to process.""" )
if audio is not None:
UpperCamelCase__ :List[Any] = self.feature_extractor(lowerCamelCase__ , *lowerCamelCase__ , sampling_rate=lowerCamelCase__ , **lowerCamelCase__ )
if text is not None:
UpperCamelCase__ :List[Any] = self.tokenizer(lowerCamelCase__ , **lowerCamelCase__ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
UpperCamelCase__ :int = encodings["""input_ids"""]
return inputs
def __a ( self :Union[str, Any] , *lowerCamelCase__ :Optional[int] , **lowerCamelCase__ :Optional[int] ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor.pad(*lowerCamelCase__ , **lowerCamelCase__ )
UpperCamelCase__ :Tuple = kwargs.pop("""input_features""" , lowerCamelCase__ )
UpperCamelCase__ :str = kwargs.pop("""labels""" , lowerCamelCase__ )
if len(lowerCamelCase__ ) > 0:
UpperCamelCase__ :Dict = args[0]
UpperCamelCase__ :int = args[1:]
if input_features is not None:
UpperCamelCase__ :Dict = self.feature_extractor.pad(lowerCamelCase__ , *lowerCamelCase__ , **lowerCamelCase__ )
if labels is not None:
UpperCamelCase__ :int = self.tokenizer.pad(lowerCamelCase__ , **lowerCamelCase__ )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
UpperCamelCase__ :List[Any] = labels["""input_ids"""]
return input_features
def __a ( self :List[Any] , *lowerCamelCase__ :Tuple , **lowerCamelCase__ :List[Any] ):
return self.tokenizer.batch_decode(*lowerCamelCase__ , **lowerCamelCase__ )
def __a ( self :Tuple , *lowerCamelCase__ :str , **lowerCamelCase__ :List[Any] ):
return self.tokenizer.decode(*lowerCamelCase__ , **lowerCamelCase__ )
@contextmanager
def __a ( self :Optional[Any] ):
warnings.warn(
"""`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your """
"""labels by using the argument `text` of the regular `__call__` method (either in the same call as """
"""your audio inputs, or in a separate call.""" )
UpperCamelCase__ :Optional[int] = True
UpperCamelCase__ :Dict = self.tokenizer
yield
UpperCamelCase__ :int = self.feature_extractor
UpperCamelCase__ :Union[str, Any] = False | 45 |
'''simple docstring'''
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
__magic_name__ : Tuple = 0
__magic_name__ : Dict = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
__magic_name__ : str = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
__magic_name__ : Dict = tuple[int, int]
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ):
_snake_case = pos_x
_snake_case = pos_y
_snake_case = (pos_y, pos_x)
_snake_case = goal_x
_snake_case = goal_y
_snake_case = g_cost
_snake_case = parent
_snake_case = self.calculate_heuristic()
_snake_case = self.g_cost + self.h_cost
def UpperCamelCase( self ):
_snake_case = self.pos_x - self.goal_x
_snake_case = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(lowerCamelCase ) + abs(lowerCamelCase )
else:
return sqrt(dy**2 + dx**2 )
def __lt__( self , lowerCamelCase ):
return self.f_cost < other.f_cost
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self , lowerCamelCase , lowerCamelCase ):
_snake_case = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , lowerCamelCase )
_snake_case = Node(goal[1] , goal[0] , goal[1] , goal[0] , 99_999 , lowerCamelCase )
_snake_case = [self.start]
_snake_case = []
_snake_case = False
def UpperCamelCase( self ):
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
_snake_case = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(lowerCamelCase )
self.closed_nodes.append(lowerCamelCase )
_snake_case = self.get_successors(lowerCamelCase )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(lowerCamelCase )
else:
# retrieve the best current path
_snake_case = self.open_nodes.pop(self.open_nodes.index(lowerCamelCase ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(lowerCamelCase )
else:
self.open_nodes.append(lowerCamelCase )
return [self.start.pos]
def UpperCamelCase( self , lowerCamelCase ):
_snake_case = []
for action in delta:
_snake_case = parent.pos_x + action[1]
_snake_case = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(lowerCamelCase ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
lowerCamelCase , lowerCamelCase , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , lowerCamelCase , ) )
return successors
def UpperCamelCase( self , lowerCamelCase ):
_snake_case = node
_snake_case = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
_snake_case = current_node.parent
path.reverse()
return path
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self , lowerCamelCase , lowerCamelCase ):
_snake_case = AStar(lowerCamelCase , lowerCamelCase )
_snake_case = AStar(lowerCamelCase , lowerCamelCase )
_snake_case = False
def UpperCamelCase( self ):
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
_snake_case = self.fwd_astar.open_nodes.pop(0 )
_snake_case = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
lowerCamelCase , lowerCamelCase )
self.fwd_astar.closed_nodes.append(lowerCamelCase )
self.bwd_astar.closed_nodes.append(lowerCamelCase )
_snake_case = current_bwd_node
_snake_case = current_fwd_node
_snake_case = {
self.fwd_astar: self.fwd_astar.get_successors(lowerCamelCase ),
self.bwd_astar: self.bwd_astar.get_successors(lowerCamelCase ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(lowerCamelCase )
else:
# retrieve the best current path
_snake_case = astar.open_nodes.pop(
astar.open_nodes.index(lowerCamelCase ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(lowerCamelCase )
else:
astar.open_nodes.append(lowerCamelCase )
return [self.fwd_astar.start.pos]
def UpperCamelCase( self , lowerCamelCase , lowerCamelCase ):
_snake_case = self.fwd_astar.retrace_path(lowerCamelCase )
_snake_case = self.bwd_astar.retrace_path(lowerCamelCase )
bwd_path.pop()
bwd_path.reverse()
_snake_case = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
__magic_name__ : Optional[int] = (0, 0)
__magic_name__ : Any = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
__magic_name__ : Any = time.time()
__magic_name__ : Optional[int] = AStar(init, goal)
__magic_name__ : str = a_star.search()
__magic_name__ : List[Any] = time.time() - start_time
print(F'AStar execution time = {end_time:f} seconds')
__magic_name__ : List[str] = time.time()
__magic_name__ : Optional[Any] = BidirectionalAStar(init, goal)
__magic_name__ : Optional[int] = time.time() - bd_start_time
print(F'BidirectionalAStar execution time = {bd_end_time:f} seconds')
| 672 | 0 |
"""simple docstring"""
from __future__ import annotations
import requests
def lowerCamelCase_( _lowerCamelCase ) -> dict:
'''simple docstring'''
_lowerCamelCase : List[str] = F"""https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty"""
return requests.get(_lowerCamelCase ).json()
def lowerCamelCase_( _lowerCamelCase = 10 ) -> list[dict]:
'''simple docstring'''
_lowerCamelCase : List[str] = "https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty"
_lowerCamelCase : List[Any] = requests.get(_lowerCamelCase ).json()[:max_stories]
return [get_hackernews_story(_lowerCamelCase ) for story_id in story_ids]
def lowerCamelCase_( _lowerCamelCase = 10 ) -> str:
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = hackernews_top_stories(_lowerCamelCase )
return "\n".join("* [{title}]({url})".format(**_lowerCamelCase ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown()) | 46 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__magic_name__ : int = {
"""configuration_nezha""": ["""NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """NezhaConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : List[Any] = [
"""NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""NezhaForNextSentencePrediction""",
"""NezhaForMaskedLM""",
"""NezhaForPreTraining""",
"""NezhaForMultipleChoice""",
"""NezhaForQuestionAnswering""",
"""NezhaForSequenceClassification""",
"""NezhaForTokenClassification""",
"""NezhaModel""",
"""NezhaPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nezha import (
NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
NezhaPreTrainedModel,
)
else:
import sys
__magic_name__ : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 672 | 0 |
def UpperCAmelCase__ ( lowerCamelCase_ : List[Any] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Dict , lowerCamelCase_ : Tuple ):
# Return True if there is node that has not iterated.
__a : int = [False] * len(lowerCamelCase_ )
__a : Optional[int] = []
queue.append(lowerCamelCase_ )
__a : List[str] = True
while queue:
__a : Optional[Any] = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(lowerCamelCase_ )
__a : Dict = True
__a : int = u
return visited[t]
def UpperCAmelCase__ ( lowerCamelCase_ : Dict , lowerCamelCase_ : Tuple , lowerCamelCase_ : Any ):
# This array is filled by BFS and to store path
__a : List[str] = [-1] * (len(lowerCamelCase_ ))
__a : int = 0
while bfs(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
__a : List[Any] = float('Inf' )
__a : List[Any] = sink
while s != source:
# Find the minimum value in select path
__a : List[str] = min(lowerCamelCase_ , graph[parent[s]][s] )
__a : List[str] = parent[s]
max_flow += path_flow
__a : int = sink
while v != source:
__a : Optional[Any] = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
__a : Optional[Any] = parent[v]
return max_flow
SCREAMING_SNAKE_CASE__ = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = 0, 5
print(ford_fulkerson(graph, source, sink))
| 47 |
'''simple docstring'''
import string
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
_snake_case = ""
for i in sequence:
_snake_case = ord(SCREAMING_SNAKE_CASE__ )
if 65 <= extract <= 90:
output += chr(1_55 - extract )
elif 97 <= extract <= 1_22:
output += chr(2_19 - extract )
else:
output += i
return output
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
_snake_case = string.ascii_letters
_snake_case = string.ascii_lowercase[::-1] + string.ascii_uppercase[::-1]
return "".join(
letters_reversed[letters.index(SCREAMING_SNAKE_CASE__ )] if c in letters else c for c in sequence )
def snake_case_ ( ):
'''simple docstring'''
from timeit import timeit
print("Running performance benchmarks..." )
_snake_case = "from string import printable ; from __main__ import atbash, atbash_slow"
print(f'''> atbash_slow(): {timeit("atbash_slow(printable)" , setup=SCREAMING_SNAKE_CASE__ )} seconds''' )
print(f'''> atbash(): {timeit("atbash(printable)" , setup=SCREAMING_SNAKE_CASE__ )} seconds''' )
if __name__ == "__main__":
for example in ("ABCDEFGH", "123GGjj", "testStringtest", "with space"):
print(F'{example} encrypted in atbash: {atbash(example)}')
benchmark()
| 672 | 0 |
'''simple docstring'''
import sys
from collections import defaultdict
class A :
def __init__( self : Any ):
"""simple docstring"""
lowerCAmelCase__ = []
def __SCREAMING_SNAKE_CASE ( self : List[str] , __magic_name__ : List[Any] ):
"""simple docstring"""
return self.node_position[vertex]
def __SCREAMING_SNAKE_CASE ( self : Tuple , __magic_name__ : List[str] , __magic_name__ : List[str] ):
"""simple docstring"""
lowerCAmelCase__ = pos
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __magic_name__ : int , __magic_name__ : Optional[Any] , __magic_name__ : List[Any] , __magic_name__ : List[str] ):
"""simple docstring"""
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
lowerCAmelCase__ = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
lowerCAmelCase__ = 2 * start + 1
else:
lowerCAmelCase__ = 2 * start + 2
if heap[smallest_child] < heap[start]:
lowerCAmelCase__ ,lowerCAmelCase__ = heap[smallest_child], positions[smallest_child]
lowerCAmelCase__ ,lowerCAmelCase__ = (
heap[start],
positions[start],
)
lowerCAmelCase__ ,lowerCAmelCase__ = temp, tempa
lowerCAmelCase__ = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , __magic_name__ )
self.top_to_bottom(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : Optional[Any] , __magic_name__ : Dict , __magic_name__ : List[str] , __magic_name__ : List[str] ):
"""simple docstring"""
lowerCAmelCase__ = position[index]
while index != 0:
lowerCAmelCase__ = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
lowerCAmelCase__ = heap[parent]
lowerCAmelCase__ = position[parent]
self.set_position(position[parent] , __magic_name__ )
else:
lowerCAmelCase__ = val
lowerCAmelCase__ = temp
self.set_position(__magic_name__ , __magic_name__ )
break
lowerCAmelCase__ = parent
else:
lowerCAmelCase__ = val
lowerCAmelCase__ = temp
self.set_position(__magic_name__ , 0 )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __magic_name__ : str , __magic_name__ : int ):
"""simple docstring"""
lowerCAmelCase__ = len(__magic_name__ ) // 2 - 1
for i in range(__magic_name__ , -1 , -1 ):
self.top_to_bottom(__magic_name__ , __magic_name__ , len(__magic_name__ ) , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : List[str] , __magic_name__ : Union[str, Any] , __magic_name__ : Tuple ):
"""simple docstring"""
lowerCAmelCase__ = positions[0]
lowerCAmelCase__ = sys.maxsize
self.top_to_bottom(__magic_name__ , 0 , len(__magic_name__ ) , __magic_name__ )
return temp
def A ( UpperCamelCase_ : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase__ = Heap()
lowerCAmelCase__ = [0] * len(UpperCamelCase_ )
lowerCAmelCase__ = [-1] * len(UpperCamelCase_ ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
lowerCAmelCase__ = [] # Heap of Distance of vertices from their neighboring vertex
lowerCAmelCase__ = []
for vertex in range(len(UpperCamelCase_ ) ):
distance_tv.append(sys.maxsize )
positions.append(UpperCamelCase_ )
heap.node_position.append(UpperCamelCase_ )
lowerCAmelCase__ = []
lowerCAmelCase__ = 1
lowerCAmelCase__ = sys.maxsize
for neighbor, distance in adjacency_list[0]:
lowerCAmelCase__ = 0
lowerCAmelCase__ = distance
heap.heapify(UpperCamelCase_ , UpperCamelCase_ )
for _ in range(1 , len(UpperCamelCase_ ) ):
lowerCAmelCase__ = heap.delete_minimum(UpperCamelCase_ , UpperCamelCase_ )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
lowerCAmelCase__ = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(UpperCamelCase_ )]
):
lowerCAmelCase__ = distance
heap.bottom_to_top(
UpperCamelCase_ , heap.get_position(UpperCamelCase_ ) , UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase__ = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
UpperCAmelCase__ : Optional[int] = int(input("Enter number of edges: ").strip())
UpperCAmelCase__ : str = defaultdict(list)
for _ in range(edges_number):
UpperCAmelCase__ : int = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 48 |
'''simple docstring'''
import numpy as np
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return 1 / (1 + np.exp(-vector ))
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return vector * sigmoid(1.702 * vector )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 672 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase : List[Any] = logging.get_logger(__name__)
_lowercase : Any = {'openai-gpt': 'https://huggingface.co/openai-gpt/resolve/main/config.json'}
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : Optional[int] = "openai-gpt"
a__ : Optional[Any] = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : Optional[Any] , _lowercase : Tuple=4_04_78 , _lowercase : List[str]=5_12 , _lowercase : List[str]=7_68 , _lowercase : Optional[Any]=12 , _lowercase : List[Any]=12 , _lowercase : Tuple="gelu" , _lowercase : Tuple=0.1 , _lowercase : str=0.1 , _lowercase : str=0.1 , _lowercase : Union[str, Any]=1E-5 , _lowercase : str=0.02 , _lowercase : Tuple="cls_index" , _lowercase : int=True , _lowercase : str=None , _lowercase : int=True , _lowercase : Union[str, Any]=0.1 , **_lowercase : str , ):
__UpperCAmelCase = vocab_size
__UpperCAmelCase = n_positions
__UpperCAmelCase = n_embd
__UpperCAmelCase = n_layer
__UpperCAmelCase = n_head
__UpperCAmelCase = afn
__UpperCAmelCase = resid_pdrop
__UpperCAmelCase = embd_pdrop
__UpperCAmelCase = attn_pdrop
__UpperCAmelCase = layer_norm_epsilon
__UpperCAmelCase = initializer_range
__UpperCAmelCase = summary_type
__UpperCAmelCase = summary_use_proj
__UpperCAmelCase = summary_activation
__UpperCAmelCase = summary_first_dropout
__UpperCAmelCase = summary_proj_to_labels
super().__init__(**_lowercase )
| 49 |
'''simple docstring'''
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('''TEST_SAGEMAKER''' , '''False''' ) ) is not True , reason='''Skipping test because should only be run when releasing minor transformers version''' , )
@pytest.mark.usefixtures('''sm_env''' )
@parameterized_class(
[
{
'''framework''': '''pytorch''',
'''script''': '''run_glue.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.g4dn.xlarge''',
'''results''': {'''train_runtime''': 650, '''eval_accuracy''': 0.6, '''eval_loss''': 0.9},
},
{
'''framework''': '''tensorflow''',
'''script''': '''run_tf.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.g4dn.xlarge''',
'''results''': {'''train_runtime''': 600, '''eval_accuracy''': 0.3, '''eval_loss''': 0.9},
},
] )
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase( self ):
if self.framework == "pytorch":
subprocess.run(
F'''cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'''.split() , encoding="utf-8" , check=lowerCamelCase , )
assert hasattr(self , "env" )
def UpperCamelCase( self , lowerCamelCase=1 ):
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=F'''{self.env.base_job_name}-single''' , instance_count=lowerCamelCase , instance_type=self.instance_type , debugger_hook_config=lowerCamelCase , hyperparameters={**self.env.hyperparameters, "model_name_or_path": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , py_version="py36" , )
def UpperCamelCase( self , lowerCamelCase ):
TrainingJobAnalytics(lowerCamelCase ).export_csv(F'''{self.env.test_path}/{job_name}_metrics.csv''' )
def UpperCamelCase( self ):
# create estimator
_snake_case = self.create_estimator()
# run training
estimator.fit()
# result dataframe
_snake_case = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
_snake_case = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"] )
_snake_case = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
_snake_case = (
Session().describe_training_job(estimator.latest_training_job.name ).get("TrainingTimeInSeconds" , 999_999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy )
assert all(t <= self.results["eval_loss"] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F'''{estimator.latest_training_job.name}.json''' , "w" ) as outfile:
json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss} , lowerCamelCase )
| 672 | 0 |
'''simple docstring'''
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def A__ ( __lowerCAmelCase : int , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] ):
if isinstance(__lowerCAmelCase , torch.Tensor ):
return image
elif isinstance(__lowerCAmelCase , PIL.Image.Image ):
lowerCamelCase__ = [image]
if isinstance(image[0] , PIL.Image.Image ):
lowerCamelCase__ = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION["""lanczos"""] ) )[None, :] for i in image]
lowerCamelCase__ = np.concatenate(__lowerCAmelCase , axis=0 )
lowerCamelCase__ = np.array(__lowerCAmelCase ).astype(np.floataa ) / 255.0
lowerCamelCase__ = image.transpose(0 , 3 , 1 , 2 )
lowerCamelCase__ = 2.0 * image - 1.0
lowerCamelCase__ = torch.from_numpy(__lowerCAmelCase )
elif isinstance(image[0] , torch.Tensor ):
lowerCamelCase__ = torch.cat(__lowerCAmelCase , dim=0 )
return image
def A__ ( __lowerCAmelCase : List[str] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : int , __lowerCAmelCase : Any=0.9995 ):
if not isinstance(__lowerCAmelCase , np.ndarray ):
lowerCamelCase__ = True
lowerCamelCase__ = va.device
lowerCamelCase__ = va.cpu().numpy()
lowerCamelCase__ = va.cpu().numpy()
lowerCamelCase__ = np.sum(va * va / (np.linalg.norm(__lowerCAmelCase ) * np.linalg.norm(__lowerCAmelCase )) )
if np.abs(__lowerCAmelCase ) > DOT_THRESHOLD:
lowerCamelCase__ = (1 - t) * va + t * va
else:
lowerCamelCase__ = np.arccos(__lowerCAmelCase )
lowerCamelCase__ = np.sin(__lowerCAmelCase )
lowerCamelCase__ = theta_a * t
lowerCamelCase__ = np.sin(__lowerCAmelCase )
lowerCamelCase__ = np.sin(theta_a - theta_t ) / sin_theta_a
lowerCamelCase__ = sin_theta_t / sin_theta_a
lowerCamelCase__ = sa * va + sa * va
if inputs_are_torch:
lowerCamelCase__ = torch.from_numpy(__lowerCAmelCase ).to(__lowerCAmelCase )
return va
def A__ ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[Any] ):
lowerCamelCase__ = F.normalize(__lowerCAmelCase , dim=-1 )
lowerCamelCase__ = F.normalize(__lowerCAmelCase , dim=-1 )
return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 )
def A__ ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : str ):
for param in model.parameters():
lowerCamelCase__ = value
class UpperCamelCase__ (a ):
'''simple docstring'''
def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase=None ,_lowerCAmelCase=None ,_lowerCAmelCase=None ,):
super().__init__()
self.register_modules(
vae=_lowerCAmelCase ,text_encoder=_lowerCAmelCase ,clip_model=_lowerCAmelCase ,tokenizer=_lowerCAmelCase ,unet=_lowerCAmelCase ,scheduler=_lowerCAmelCase ,feature_extractor=_lowerCAmelCase ,coca_model=_lowerCAmelCase ,coca_tokenizer=_lowerCAmelCase ,coca_transform=_lowerCAmelCase ,)
lowerCamelCase__ = (
feature_extractor.size
if isinstance(feature_extractor.size ,_lowerCAmelCase )
else feature_extractor.size["""shortest_edge"""]
)
lowerCamelCase__ = transforms.Normalize(mean=feature_extractor.image_mean ,std=feature_extractor.image_std )
set_requires_grad(self.text_encoder ,_lowerCAmelCase )
set_requires_grad(self.clip_model ,_lowerCAmelCase )
def UpperCamelCase_ ( self ,_lowerCAmelCase = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
lowerCamelCase__ = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_lowerCAmelCase )
def UpperCamelCase_ ( self ):
self.enable_attention_slicing(_lowerCAmelCase )
def UpperCamelCase_ ( self ):
set_requires_grad(self.vae ,_lowerCAmelCase )
def UpperCamelCase_ ( self ):
set_requires_grad(self.vae ,_lowerCAmelCase )
def UpperCamelCase_ ( self ):
set_requires_grad(self.unet ,_lowerCAmelCase )
def UpperCamelCase_ ( self ):
set_requires_grad(self.unet ,_lowerCAmelCase )
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ):
# get the original timestep using init_timestep
lowerCamelCase__ = min(int(num_inference_steps * strength ) ,_lowerCAmelCase )
lowerCamelCase__ = max(num_inference_steps - init_timestep ,0 )
lowerCamelCase__ = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase=None ):
if not isinstance(_lowerCAmelCase ,torch.Tensor ):
raise ValueError(F'''`image` has to be of type `torch.Tensor` but is {type(_lowerCAmelCase )}''' )
lowerCamelCase__ = image.to(device=_lowerCAmelCase ,dtype=_lowerCAmelCase )
if isinstance(_lowerCAmelCase ,_lowerCAmelCase ):
lowerCamelCase__ = [
self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(_lowerCAmelCase )
]
lowerCamelCase__ = torch.cat(_lowerCAmelCase ,dim=0 )
else:
lowerCamelCase__ = self.vae.encode(_lowerCAmelCase ).latent_dist.sample(_lowerCAmelCase )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
lowerCamelCase__ = 0.1_8215 * init_latents
lowerCamelCase__ = init_latents.repeat_interleave(_lowerCAmelCase ,dim=0 )
lowerCamelCase__ = randn_tensor(init_latents.shape ,generator=_lowerCAmelCase ,device=_lowerCAmelCase ,dtype=_lowerCAmelCase )
# get latents
lowerCamelCase__ = self.scheduler.add_noise(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = init_latents
return latents
def UpperCamelCase_ ( self ,_lowerCAmelCase ):
lowerCamelCase__ = self.coca_transform(_lowerCAmelCase ).unsqueeze(0 )
with torch.no_grad(), torch.cuda.amp.autocast():
lowerCamelCase__ = self.coca_model.generate(transformed_image.to(device=self.device ,dtype=self.coca_model.dtype ) )
lowerCamelCase__ = self.coca_tokenizer.decode(generated[0].cpu().numpy() )
return generated.split("""<end_of_text>""" )[0].replace("""<start_of_text>""" ,"""""" ).rstrip(""" .,""" )
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ):
lowerCamelCase__ = self.feature_extractor.preprocess(_lowerCAmelCase )
lowerCamelCase__ = torch.from_numpy(clip_image_input["""pixel_values"""][0] ).unsqueeze(0 ).to(self.device ).half()
lowerCamelCase__ = self.clip_model.get_image_features(_lowerCAmelCase )
lowerCamelCase__ = image_embeddings_clip / image_embeddings_clip.norm(p=2 ,dim=-1 ,keepdim=_lowerCAmelCase )
lowerCamelCase__ = image_embeddings_clip.repeat_interleave(_lowerCAmelCase ,dim=0 )
return image_embeddings_clip
@torch.enable_grad()
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,):
lowerCamelCase__ = latents.detach().requires_grad_()
lowerCamelCase__ = self.scheduler.scale_model_input(_lowerCAmelCase ,_lowerCAmelCase )
# predict the noise residual
lowerCamelCase__ = self.unet(_lowerCAmelCase ,_lowerCAmelCase ,encoder_hidden_states=_lowerCAmelCase ).sample
if isinstance(self.scheduler ,(PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ):
lowerCamelCase__ = self.scheduler.alphas_cumprod[timestep]
lowerCamelCase__ = 1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
lowerCamelCase__ = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
lowerCamelCase__ = torch.sqrt(_lowerCAmelCase )
lowerCamelCase__ = pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler ,_lowerCAmelCase ):
lowerCamelCase__ = self.scheduler.sigmas[index]
lowerCamelCase__ = latents - sigma * noise_pred
else:
raise ValueError(F'''scheduler type {type(self.scheduler )} not supported''' )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
lowerCamelCase__ = 1 / 0.1_8215 * sample
lowerCamelCase__ = self.vae.decode(_lowerCAmelCase ).sample
lowerCamelCase__ = (image / 2 + 0.5).clamp(0 ,1 )
lowerCamelCase__ = transforms.Resize(self.feature_extractor_size )(_lowerCAmelCase )
lowerCamelCase__ = self.normalize(_lowerCAmelCase ).to(latents.dtype )
lowerCamelCase__ = self.clip_model.get_image_features(_lowerCAmelCase )
lowerCamelCase__ = image_embeddings_clip / image_embeddings_clip.norm(p=2 ,dim=-1 ,keepdim=_lowerCAmelCase )
lowerCamelCase__ = spherical_dist_loss(_lowerCAmelCase ,_lowerCAmelCase ).mean() * clip_guidance_scale
lowerCamelCase__ = -torch.autograd.grad(_lowerCAmelCase ,_lowerCAmelCase )[0]
if isinstance(self.scheduler ,_lowerCAmelCase ):
lowerCamelCase__ = latents.detach() + grads * (sigma**2)
lowerCamelCase__ = noise_pred_original
else:
lowerCamelCase__ = noise_pred_original - torch.sqrt(_lowerCAmelCase ) * grads
return noise_pred, latents
@torch.no_grad()
def __call__( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase = None ,_lowerCAmelCase = None ,_lowerCAmelCase = 5_12 ,_lowerCAmelCase = 5_12 ,_lowerCAmelCase = 0.6 ,_lowerCAmelCase = 50 ,_lowerCAmelCase = 7.5 ,_lowerCAmelCase = 1 ,_lowerCAmelCase = 0.0 ,_lowerCAmelCase = 1_00 ,_lowerCAmelCase = None ,_lowerCAmelCase = "pil" ,_lowerCAmelCase = True ,_lowerCAmelCase = 0.8 ,_lowerCAmelCase = 0.1 ,_lowerCAmelCase = 0.1 ,):
if isinstance(_lowerCAmelCase ,_lowerCAmelCase ) and len(_lowerCAmelCase ) != batch_size:
raise ValueError(F'''You have passed {batch_size} batch_size, but only {len(_lowerCAmelCase )} generators.''' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' )
if isinstance(_lowerCAmelCase ,torch.Generator ) and batch_size > 1:
lowerCamelCase__ = [generator] + [None] * (batch_size - 1)
lowerCamelCase__ = [
("""model""", self.coca_model is None),
("""tokenizer""", self.coca_tokenizer is None),
("""transform""", self.coca_transform is None),
]
lowerCamelCase__ = [x[0] for x in coca_is_none if x[1]]
lowerCamelCase__ = """, """.join(_lowerCAmelCase )
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(_lowerCAmelCase ):
raise ValueError(
F'''Content prompt is None and CoCa [{coca_is_none_str}] is None.'''
F'''Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.''' )
lowerCamelCase__ = self.get_image_description(_lowerCAmelCase )
if style_prompt is None:
if len(_lowerCAmelCase ):
raise ValueError(
F'''Style prompt is None and CoCa [{coca_is_none_str}] is None.'''
F''' Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.''' )
lowerCamelCase__ = self.get_image_description(_lowerCAmelCase )
# get prompt text embeddings for content and style
lowerCamelCase__ = self.tokenizer(
_lowerCAmelCase ,padding="""max_length""" ,max_length=self.tokenizer.model_max_length ,truncation=_lowerCAmelCase ,return_tensors="""pt""" ,)
lowerCamelCase__ = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0]
lowerCamelCase__ = self.tokenizer(
_lowerCAmelCase ,padding="""max_length""" ,max_length=self.tokenizer.model_max_length ,truncation=_lowerCAmelCase ,return_tensors="""pt""" ,)
lowerCamelCase__ = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0]
lowerCamelCase__ = slerp(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
# duplicate text embeddings for each generation per prompt
lowerCamelCase__ = text_embeddings.repeat_interleave(_lowerCAmelCase ,dim=0 )
# set timesteps
lowerCamelCase__ = """offset""" in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() )
lowerCamelCase__ = {}
if accepts_offset:
lowerCamelCase__ = 1
self.scheduler.set_timesteps(_lowerCAmelCase ,**_lowerCAmelCase )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device )
lowerCamelCase__ , lowerCamelCase__ = self.get_timesteps(_lowerCAmelCase ,_lowerCAmelCase ,self.device )
lowerCamelCase__ = timesteps[:1].repeat(_lowerCAmelCase )
# Preprocess image
lowerCamelCase__ = preprocess(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = self.prepare_latents(
_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,text_embeddings.dtype ,self.device ,_lowerCAmelCase )
lowerCamelCase__ = preprocess(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = self.prepare_latents(
_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,text_embeddings.dtype ,self.device ,_lowerCAmelCase )
lowerCamelCase__ = slerp(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
if clip_guidance_scale > 0:
lowerCamelCase__ = self.get_clip_image_embeddings(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = self.get_clip_image_embeddings(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = slerp(
_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
lowerCamelCase__ = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
lowerCamelCase__ = content_text_input.input_ids.shape[-1]
lowerCamelCase__ = self.tokenizer([""""""] ,padding="""max_length""" ,max_length=_lowerCAmelCase ,return_tensors="""pt""" )
lowerCamelCase__ = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt
lowerCamelCase__ = uncond_embeddings.repeat_interleave(_lowerCAmelCase ,dim=0 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowerCamelCase__ = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
lowerCamelCase__ = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
lowerCamelCase__ = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
lowerCamelCase__ = torch.randn(_lowerCAmelCase ,generator=_lowerCAmelCase ,device="""cpu""" ,dtype=_lowerCAmelCase ).to(
self.device )
else:
lowerCamelCase__ = torch.randn(_lowerCAmelCase ,generator=_lowerCAmelCase ,device=self.device ,dtype=_lowerCAmelCase )
else:
if latents.shape != latents_shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
lowerCamelCase__ = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
lowerCamelCase__ = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
lowerCamelCase__ = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowerCamelCase__ = {}
if accepts_eta:
lowerCamelCase__ = eta
# check if the scheduler accepts generator
lowerCamelCase__ = """generator""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
if accepts_generator:
lowerCamelCase__ = generator
with self.progress_bar(total=_lowerCAmelCase ):
for i, t in enumerate(_lowerCAmelCase ):
# expand the latents if we are doing classifier free guidance
lowerCamelCase__ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowerCamelCase__ = self.scheduler.scale_model_input(_lowerCAmelCase ,_lowerCAmelCase )
# predict the noise residual
lowerCamelCase__ = self.unet(_lowerCAmelCase ,_lowerCAmelCase ,encoder_hidden_states=_lowerCAmelCase ).sample
# perform classifier free guidance
if do_classifier_free_guidance:
lowerCamelCase__ , lowerCamelCase__ = noise_pred.chunk(2 )
lowerCamelCase__ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
lowerCamelCase__ = (
text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings
)
lowerCamelCase__ , lowerCamelCase__ = self.cond_fn(
_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,)
# compute the previous noisy sample x_t -> x_t-1
lowerCamelCase__ = self.scheduler.step(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,**_lowerCAmelCase ).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
lowerCamelCase__ = 1 / 0.1_8215 * latents
lowerCamelCase__ = self.vae.decode(_lowerCAmelCase ).sample
lowerCamelCase__ = (image / 2 + 0.5).clamp(0 ,1 )
lowerCamelCase__ = image.cpu().permute(0 ,2 ,3 ,1 ).numpy()
if output_type == "pil":
lowerCamelCase__ = self.numpy_to_pil(_lowerCAmelCase )
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=_lowerCAmelCase ,nsfw_content_detected=_lowerCAmelCase )
| 50 |
'''simple docstring'''
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( __UpperCamelCase ):
'''simple docstring'''
UpperCAmelCase__ : int = DistilBertTokenizer
UpperCAmelCase__ : Union[str, Any] = DistilBertTokenizerFast
UpperCAmelCase__ : List[str] = True
@slow
def UpperCamelCase( self ):
_snake_case = DistilBertTokenizer.from_pretrained("distilbert-base-uncased" )
_snake_case = tokenizer.encode("sequence builders" , add_special_tokens=lowerCamelCase )
_snake_case = tokenizer.encode("multi-sequence build" , add_special_tokens=lowerCamelCase )
_snake_case = tokenizer.build_inputs_with_special_tokens(lowerCamelCase )
_snake_case = tokenizer.build_inputs_with_special_tokens(lowerCamelCase , lowerCamelCase )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 672 | 0 |
'''simple docstring'''
import argparse
import torch
from datasets import load_dataset
from donut import DonutModel
from transformers import (
DonutImageProcessor,
DonutProcessor,
DonutSwinConfig,
DonutSwinModel,
MBartConfig,
MBartForCausalLM,
VisionEncoderDecoderModel,
XLMRobertaTokenizerFast,
)
def __snake_case ( SCREAMING_SNAKE_CASE_ : int ) -> int:
"""simple docstring"""
UpperCAmelCase = model.config
UpperCAmelCase = DonutSwinConfig(
image_size=original_config.input_size , patch_size=4 , depths=original_config.encoder_layer , num_heads=[4, 8, 16, 32] , window_size=original_config.window_size , embed_dim=128 , )
UpperCAmelCase = MBartConfig(
is_decoder=SCREAMING_SNAKE_CASE_ , is_encoder_decoder=SCREAMING_SNAKE_CASE_ , add_cross_attention=SCREAMING_SNAKE_CASE_ , decoder_layers=original_config.decoder_layer , max_position_embeddings=original_config.max_position_embeddings , vocab_size=len(
model.decoder.tokenizer ) , scale_embedding=SCREAMING_SNAKE_CASE_ , add_final_layer_norm=SCREAMING_SNAKE_CASE_ , )
return encoder_config, decoder_config
def __snake_case ( SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
if "encoder.model" in name:
UpperCAmelCase = name.replace('''encoder.model''' , '''encoder''' )
if "decoder.model" in name:
UpperCAmelCase = name.replace('''decoder.model''' , '''decoder''' )
if "patch_embed.proj" in name:
UpperCAmelCase = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
UpperCAmelCase = name.replace('''patch_embed.norm''' , '''embeddings.norm''' )
if name.startswith('''encoder''' ):
if "layers" in name:
UpperCAmelCase = '''encoder.''' + name
if "attn.proj" in name:
UpperCAmelCase = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name and "mask" not in name:
UpperCAmelCase = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
UpperCAmelCase = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
UpperCAmelCase = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
UpperCAmelCase = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
UpperCAmelCase = name.replace('''mlp.fc2''' , '''output.dense''' )
if name == "encoder.norm.weight":
UpperCAmelCase = '''encoder.layernorm.weight'''
if name == "encoder.norm.bias":
UpperCAmelCase = '''encoder.layernorm.bias'''
return name
def __snake_case ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> Tuple:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
UpperCAmelCase = orig_state_dict.pop(SCREAMING_SNAKE_CASE_ )
if "qkv" in key:
UpperCAmelCase = key.split('''.''' )
UpperCAmelCase = int(key_split[3] )
UpperCAmelCase = int(key_split[5] )
UpperCAmelCase = model.encoder.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
UpperCAmelCase = val[:dim, :]
UpperCAmelCase = val[dim : dim * 2, :]
UpperCAmelCase = val[-dim:, :]
else:
UpperCAmelCase = val[:dim]
UpperCAmelCase = val[dim : dim * 2]
UpperCAmelCase = val[-dim:]
elif "attn_mask" in key or key in ["encoder.model.norm.weight", "encoder.model.norm.bias"]:
# HuggingFace implementation doesn't use attn_mask buffer
# and model doesn't use final LayerNorms for the encoder
pass
else:
UpperCAmelCase = val
return orig_state_dict
def __snake_case ( SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[Any]=None , SCREAMING_SNAKE_CASE_ : int=False ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase = DonutModel.from_pretrained(SCREAMING_SNAKE_CASE_ ).eval()
# load HuggingFace model
UpperCAmelCase, UpperCAmelCase = get_configs(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase = DonutSwinModel(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase = MBartForCausalLM(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase = VisionEncoderDecoderModel(encoder=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCAmelCase = original_model.state_dict()
UpperCAmelCase = convert_state_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
model.load_state_dict(SCREAMING_SNAKE_CASE_ )
# verify results on scanned document
UpperCAmelCase = load_dataset('''hf-internal-testing/example-documents''' )
UpperCAmelCase = dataset['''test'''][0]['''image'''].convert('''RGB''' )
UpperCAmelCase = XLMRobertaTokenizerFast.from_pretrained(SCREAMING_SNAKE_CASE_ , from_slow=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase = DonutImageProcessor(
do_align_long_axis=original_model.config.align_long_axis , size=original_model.config.input_size[::-1] )
UpperCAmelCase = DonutProcessor(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCAmelCase = processor(SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' ).pixel_values
if model_name == "naver-clova-ix/donut-base-finetuned-docvqa":
UpperCAmelCase = '''<s_docvqa><s_question>{user_input}</s_question><s_answer>'''
UpperCAmelCase = '''When is the coffee break?'''
UpperCAmelCase = task_prompt.replace('''{user_input}''' , SCREAMING_SNAKE_CASE_ )
elif model_name == "naver-clova-ix/donut-base-finetuned-rvlcdip":
UpperCAmelCase = '''<s_rvlcdip>'''
elif model_name in [
"naver-clova-ix/donut-base-finetuned-cord-v1",
"naver-clova-ix/donut-base-finetuned-cord-v1-2560",
]:
UpperCAmelCase = '''<s_cord>'''
elif model_name == "naver-clova-ix/donut-base-finetuned-cord-v2":
UpperCAmelCase = '''s_cord-v2>'''
elif model_name == "naver-clova-ix/donut-base-finetuned-zhtrainticket":
UpperCAmelCase = '''<s_zhtrainticket>'''
elif model_name in ["naver-clova-ix/donut-proto", "naver-clova-ix/donut-base"]:
# use a random prompt
UpperCAmelCase = '''hello world'''
else:
raise ValueError('''Model name not supported''' )
UpperCAmelCase = original_model.decoder.tokenizer(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' )[
'''input_ids'''
]
UpperCAmelCase = original_model.encoder.model.patch_embed(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase, UpperCAmelCase = model.encoder.embeddings(SCREAMING_SNAKE_CASE_ )
assert torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1e-3 )
# verify encoder hidden states
UpperCAmelCase = original_model.encoder(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase = model.encoder(SCREAMING_SNAKE_CASE_ ).last_hidden_state
assert torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1e-2 )
# verify decoder hidden states
UpperCAmelCase = original_model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).logits
UpperCAmelCase = model(SCREAMING_SNAKE_CASE_ , decoder_input_ids=SCREAMING_SNAKE_CASE_ ).logits
assert torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1e-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f"Saving model and processor to {pytorch_dump_folder_path}" )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
processor.save_pretrained(SCREAMING_SNAKE_CASE_ )
if push_to_hub:
model.push_to_hub('''nielsr/''' + model_name.split('''/''' )[-1] , commit_message='''Update model''' )
processor.push_to_hub('''nielsr/''' + model_name.split('''/''' )[-1] , commit_message='''Update model''' )
if __name__ == "__main__":
a__ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='naver-clova-ix/donut-base-finetuned-docvqa',
required=False,
type=str,
help='Name of the original model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
required=False,
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether or not to push the converted model and processor to the 🤗 hub.',
)
a__ : Optional[Any] = parser.parse_args()
convert_donut_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 51 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__magic_name__ : Optional[int] = {
"""configuration_swiftformer""": [
"""SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""SwiftFormerConfig""",
"""SwiftFormerOnnxConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : Optional[int] = [
"""SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SwiftFormerForImageClassification""",
"""SwiftFormerModel""",
"""SwiftFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
__magic_name__ : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 672 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
A = {
'''configuration_perceiver''': ['''PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PerceiverConfig''', '''PerceiverOnnxConfig'''],
'''tokenization_perceiver''': ['''PerceiverTokenizer'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = ['''PerceiverFeatureExtractor''']
A = ['''PerceiverImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
'''PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PerceiverForImageClassificationConvProcessing''',
'''PerceiverForImageClassificationFourier''',
'''PerceiverForImageClassificationLearned''',
'''PerceiverForMaskedLM''',
'''PerceiverForMultimodalAutoencoding''',
'''PerceiverForOpticalFlow''',
'''PerceiverForSequenceClassification''',
'''PerceiverLayer''',
'''PerceiverModel''',
'''PerceiverPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig
from .tokenization_perceiver import PerceiverTokenizer
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_perceiver import PerceiverFeatureExtractor
from .image_processing_perceiver import PerceiverImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_perceiver import (
PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST,
PerceiverForImageClassificationConvProcessing,
PerceiverForImageClassificationFourier,
PerceiverForImageClassificationLearned,
PerceiverForMaskedLM,
PerceiverForMultimodalAutoencoding,
PerceiverForOpticalFlow,
PerceiverForSequenceClassification,
PerceiverLayer,
PerceiverModel,
PerceiverPreTrainedModel,
)
else:
import sys
A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 52 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
__magic_name__ : Union[str, Any] = logging.get_logger(__name__)
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=False ):
'''simple docstring'''
_snake_case = "backbone." if is_semantic else ""
_snake_case = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''{prefix}blocks.{i}.norm1.weight''', f'''beit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm1.bias''', f'''beit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(f'''{prefix}blocks.{i}.attn.proj.weight''', f'''beit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append(
(f'''{prefix}blocks.{i}.attn.proj.bias''', f'''beit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm2.weight''', f'''beit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm2.bias''', f'''beit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc1.weight''', f'''beit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc1.bias''', f'''beit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc2.weight''', f'''beit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc2.bias''', f'''beit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
(f'''{prefix}cls_token''', "beit.embeddings.cls_token"),
(f'''{prefix}patch_embed.proj.weight''', "beit.embeddings.patch_embeddings.projection.weight"),
(f'''{prefix}patch_embed.proj.bias''', "beit.embeddings.patch_embeddings.projection.bias"),
(f'''{prefix}pos_embed''', "beit.embeddings.position_embeddings"),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
("mask_token", "beit.embeddings.mask_token"),
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
("fc_norm.weight", "beit.pooler.layernorm.weight"),
("fc_norm.bias", "beit.pooler.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=False ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
_snake_case = "backbone." if is_semantic else ""
# queries, keys and values
_snake_case = state_dict.pop(f'''{prefix}blocks.{i}.attn.qkv.weight''' )
_snake_case = state_dict.pop(f'''{prefix}blocks.{i}.attn.q_bias''' )
_snake_case = state_dict.pop(f'''{prefix}blocks.{i}.attn.v_bias''' )
_snake_case = in_proj_weight[
: config.hidden_size, :
]
_snake_case = q_bias
_snake_case = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_snake_case = in_proj_weight[
-config.hidden_size :, :
]
_snake_case = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
_snake_case = state_dict.pop(f'''{prefix}blocks.{i}.gamma_1''' )
_snake_case = state_dict.pop(f'''{prefix}blocks.{i}.gamma_2''' )
_snake_case = gamma_a
_snake_case = gamma_a
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
_snake_case = dct.pop(SCREAMING_SNAKE_CASE__ )
_snake_case = val
def snake_case_ ( ):
'''simple docstring'''
_snake_case = "http://images.cocodataset.org/val2017/000000039769.jpg"
_snake_case = Image.open(requests.get(SCREAMING_SNAKE_CASE__ , stream=SCREAMING_SNAKE_CASE__ ).raw )
return im
@torch.no_grad()
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False ):
'''simple docstring'''
_snake_case = False if "rvlcdip" in checkpoint_url else True
_snake_case = BeitConfig(use_absolute_position_embeddings=SCREAMING_SNAKE_CASE__ , use_mask_token=SCREAMING_SNAKE_CASE__ )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
_snake_case = 10_24
_snake_case = 40_96
_snake_case = 24
_snake_case = 16
# labels
if "rvlcdip" in checkpoint_url:
_snake_case = 16
_snake_case = "huggingface/label-files"
_snake_case = "rvlcdip-id2label.json"
_snake_case = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , repo_type="dataset" ) , "r" ) )
_snake_case = {int(SCREAMING_SNAKE_CASE__ ): v for k, v in idalabel.items()}
_snake_case = idalabel
_snake_case = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
_snake_case = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE__ , map_location="cpu" )["model"]
_snake_case = create_rename_keys(SCREAMING_SNAKE_CASE__ , has_lm_head=SCREAMING_SNAKE_CASE__ )
for src, dest in rename_keys:
rename_key(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
read_in_q_k_v(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , has_lm_head=SCREAMING_SNAKE_CASE__ )
# load HuggingFace model
_snake_case = BeitForMaskedImageModeling(SCREAMING_SNAKE_CASE__ ) if has_lm_head else BeitForImageClassification(SCREAMING_SNAKE_CASE__ )
model.eval()
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
# Check outputs on an image
_snake_case = BeitImageProcessor(
size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=SCREAMING_SNAKE_CASE__ )
_snake_case = prepare_img()
_snake_case = image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors="pt" )
_snake_case = encoding["pixel_values"]
_snake_case = model(SCREAMING_SNAKE_CASE__ )
_snake_case = outputs.logits
# verify logits
_snake_case = [1, 16] if "rvlcdip" in checkpoint_url else [1, 1_96, 81_92]
assert logits.shape == torch.Size(SCREAMING_SNAKE_CASE__ ), "Shape of logits not as expected"
Path(SCREAMING_SNAKE_CASE__ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE__ )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
if push_to_hub:
if has_lm_head:
_snake_case = "dit-base" if "base" in checkpoint_url else "dit-large"
else:
_snake_case = "dit-base-finetuned-rvlcdip" if "dit-b" in checkpoint_url else "dit-large-finetuned-rvlcdip"
image_processor.push_to_hub(
repo_path_or_name=Path(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , organization="nielsr" , commit_message="Add image processor" , use_temp_dir=SCREAMING_SNAKE_CASE__ , )
model.push_to_hub(
repo_path_or_name=Path(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , organization="nielsr" , commit_message="Add model" , use_temp_dir=SCREAMING_SNAKE_CASE__ , )
if __name__ == "__main__":
__magic_name__ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_url""",
default="""https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth""",
type=str,
help="""URL to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
)
__magic_name__ : Dict = parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 672 | 0 |
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
_snake_case : Optional[Any] = logging.get_logger(__name__)
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
a_ = """linear"""
a_ = """cosine"""
a_ = """cosine_with_restarts"""
a_ = """polynomial"""
a_ = """constant"""
a_ = """constant_with_warmup"""
a_ = """piecewise_constant"""
def a_ ( lowerCAmelCase_ : Optimizer, lowerCAmelCase_ : int = -1 ):
return LambdaLR(lowerCAmelCase_, lambda lowerCAmelCase_ : 1, last_epoch=lowerCAmelCase_ )
def a_ ( lowerCAmelCase_ : Optimizer, lowerCAmelCase_ : int, lowerCAmelCase_ : int = -1 ):
def lr_lambda(lowerCAmelCase_ : int ):
if current_step < num_warmup_steps:
return float(lowerCAmelCase_ ) / float(max(1.0, lowerCAmelCase_ ) )
return 1.0
return LambdaLR(lowerCAmelCase_, lowerCAmelCase_, last_epoch=lowerCAmelCase_ )
def a_ ( lowerCAmelCase_ : Optimizer, lowerCAmelCase_ : str, lowerCAmelCase_ : int = -1 ):
__lowerCAmelCase = {}
__lowerCAmelCase = step_rules.split(',' )
for rule_str in rule_list[:-1]:
__lowerCAmelCase , __lowerCAmelCase = rule_str.split(':' )
__lowerCAmelCase = int(lowerCAmelCase_ )
__lowerCAmelCase = float(lowerCAmelCase_ )
__lowerCAmelCase = value
__lowerCAmelCase = float(rule_list[-1] )
def create_rules_function(lowerCAmelCase_ : Union[str, Any], lowerCAmelCase_ : List[Any] ):
def rule_func(lowerCAmelCase_ : int ) -> float:
__lowerCAmelCase = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(lowerCAmelCase_ ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
__lowerCAmelCase = create_rules_function(lowerCAmelCase_, lowerCAmelCase_ )
return LambdaLR(lowerCAmelCase_, lowerCAmelCase_, last_epoch=lowerCAmelCase_ )
def a_ ( lowerCAmelCase_ : int, lowerCAmelCase_ : str, lowerCAmelCase_ : str, lowerCAmelCase_ : Any=-1 ):
def lr_lambda(lowerCAmelCase_ : int ):
if current_step < num_warmup_steps:
return float(lowerCAmelCase_ ) / float(max(1, lowerCAmelCase_ ) )
return max(
0.0, float(num_training_steps - current_step ) / float(max(1, num_training_steps - num_warmup_steps ) ) )
return LambdaLR(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
def a_ ( lowerCAmelCase_ : Optimizer, lowerCAmelCase_ : int, lowerCAmelCase_ : int, lowerCAmelCase_ : float = 0.5, lowerCAmelCase_ : int = -1 ):
def lr_lambda(lowerCAmelCase_ : Tuple ):
if current_step < num_warmup_steps:
return float(lowerCAmelCase_ ) / float(max(1, lowerCAmelCase_ ) )
__lowerCAmelCase = float(current_step - num_warmup_steps ) / float(max(1, num_training_steps - num_warmup_steps ) )
return max(0.0, 0.5 * (1.0 + math.cos(math.pi * float(lowerCAmelCase_ ) * 2.0 * progress )) )
return LambdaLR(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
def a_ ( lowerCAmelCase_ : Optimizer, lowerCAmelCase_ : int, lowerCAmelCase_ : int, lowerCAmelCase_ : int = 1, lowerCAmelCase_ : int = -1 ):
def lr_lambda(lowerCAmelCase_ : str ):
if current_step < num_warmup_steps:
return float(lowerCAmelCase_ ) / float(max(1, lowerCAmelCase_ ) )
__lowerCAmelCase = float(current_step - num_warmup_steps ) / float(max(1, num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0, 0.5 * (1.0 + math.cos(math.pi * ((float(lowerCAmelCase_ ) * progress) % 1.0) )) )
return LambdaLR(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
def a_ ( lowerCAmelCase_ : Union[str, Any], lowerCAmelCase_ : Union[str, Any], lowerCAmelCase_ : Optional[Any], lowerCAmelCase_ : Optional[int]=1E-7, lowerCAmelCase_ : int=1.0, lowerCAmelCase_ : Optional[int]=-1 ):
__lowerCAmelCase = optimizer.defaults['lr']
if not (lr_init > lr_end):
raise ValueError(F"""lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})""" )
def lr_lambda(lowerCAmelCase_ : int ):
if current_step < num_warmup_steps:
return float(lowerCAmelCase_ ) / float(max(1, lowerCAmelCase_ ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
__lowerCAmelCase = lr_init - lr_end
__lowerCAmelCase = num_training_steps - num_warmup_steps
__lowerCAmelCase = 1 - (current_step - num_warmup_steps) / decay_steps
__lowerCAmelCase = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
_snake_case : Optional[int] = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def a_ ( lowerCAmelCase_ : Union[str, SchedulerType], lowerCAmelCase_ : Optimizer, lowerCAmelCase_ : Optional[str] = None, lowerCAmelCase_ : Optional[int] = None, lowerCAmelCase_ : Optional[int] = None, lowerCAmelCase_ : int = 1, lowerCAmelCase_ : float = 1.0, lowerCAmelCase_ : int = -1, ):
__lowerCAmelCase = SchedulerType(lowerCAmelCase_ )
__lowerCAmelCase = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(lowerCAmelCase_, last_epoch=lowerCAmelCase_ )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(lowerCAmelCase_, step_rules=lowerCAmelCase_, last_epoch=lowerCAmelCase_ )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(F"""{name} requires `num_warmup_steps`, please provide that argument.""" )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(lowerCAmelCase_, num_warmup_steps=lowerCAmelCase_, last_epoch=lowerCAmelCase_ )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(F"""{name} requires `num_training_steps`, please provide that argument.""" )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
lowerCAmelCase_, num_warmup_steps=lowerCAmelCase_, num_training_steps=lowerCAmelCase_, num_cycles=lowerCAmelCase_, last_epoch=lowerCAmelCase_, )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
lowerCAmelCase_, num_warmup_steps=lowerCAmelCase_, num_training_steps=lowerCAmelCase_, power=lowerCAmelCase_, last_epoch=lowerCAmelCase_, )
return schedule_func(
lowerCAmelCase_, num_warmup_steps=lowerCAmelCase_, num_training_steps=lowerCAmelCase_, last_epoch=lowerCAmelCase_ )
| 53 |
'''simple docstring'''
import math
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not number % 2:
# Negatives, 0, 1 and all even numbers are not primes
return False
_snake_case = range(3 , int(math.sqrt(SCREAMING_SNAKE_CASE__ ) + 1 ) , 2 )
return not any(not number % i for i in odd_numbers )
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=1 , **SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
_snake_case = factor * value
_snake_case = value
while not is_prime(SCREAMING_SNAKE_CASE__ ):
value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1 , **SCREAMING_SNAKE_CASE__ )
return value
| 672 | 0 |
def a__ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
if len(lowercase__ ) != len(lowercase__ ):
raise ValueError("The length of profit and weight must be same." )
if max_weight <= 0:
raise ValueError("max_weight must greater than zero." )
if any(p < 0 for p in profit ):
raise ValueError("Profit can not be negative." )
if any(w < 0 for w in weight ):
raise ValueError("Weight can not be negative." )
# List created to store profit gained for the 1kg in case of each weight
# respectively. Calculate and append profit/weight for each element.
UpperCAmelCase_ =[p / w for p, w in zip(lowercase__ , lowercase__ )]
# Creating a copy of the list and sorting profit/weight in ascending order
UpperCAmelCase_ =sorted(lowercase__ )
# declaring useful variables
UpperCAmelCase_ =len(lowercase__ )
UpperCAmelCase_ =0
UpperCAmelCase_ =0
UpperCAmelCase_ =0
# loop till the total weight do not reach max limit e.g. 15 kg and till i<length
while limit <= max_weight and i < length:
# flag value for encountered greatest element in sorted_profit_by_weight
UpperCAmelCase_ =sorted_profit_by_weight[length - i - 1]
UpperCAmelCase_ =profit_by_weight.index(lowercase__ )
UpperCAmelCase_ =-1
# check if the weight encountered is less than the total weight
# encountered before.
if max_weight - limit >= weight[index]:
limit += weight[index]
# Adding profit gained for the given weight 1 ===
# weight[index]/weight[index]
gain += 1 * profit[index]
else:
# Since the weight encountered is greater than limit, therefore take the
# required number of remaining kgs and calculate profit for it.
# weight remaining / weight[index]
gain += (max_weight - limit) / weight[index] * profit[index]
break
i += 1
return gain
if __name__ == "__main__":
print(
"""Input profits, weights, and then max_weight (all positive ints) separated by """
"""spaces."""
)
__lowercase : List[str] =[int(x) for x in input("""Input profits separated by spaces: """).split()]
__lowercase : Union[str, Any] =[int(x) for x in input("""Input weights separated by spaces: """).split()]
__lowercase : Tuple =int(input("""Max weight allowed: """))
# Function Call
calc_profit(profit, weight, max_weight)
| 54 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__magic_name__ : Dict = {
"""configuration_pix2struct""": [
"""PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""Pix2StructConfig""",
"""Pix2StructTextConfig""",
"""Pix2StructVisionConfig""",
],
"""processing_pix2struct""": ["""Pix2StructProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : List[str] = ["""Pix2StructImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : List[Any] = [
"""PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Pix2StructPreTrainedModel""",
"""Pix2StructForConditionalGeneration""",
"""Pix2StructVisionModel""",
"""Pix2StructTextModel""",
]
if TYPE_CHECKING:
from .configuration_pixastruct import (
PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP,
PixaStructConfig,
PixaStructTextConfig,
PixaStructVisionConfig,
)
from .processing_pixastruct import PixaStructProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_pixastruct import PixaStructImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pixastruct import (
PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST,
PixaStructForConditionalGeneration,
PixaStructPreTrainedModel,
PixaStructTextModel,
PixaStructVisionModel,
)
else:
import sys
__magic_name__ : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 672 | 0 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
SCREAMING_SNAKE_CASE :Tuple = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE :Any = {
'Visual-Attention-Network/van-base': (
'https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json'
),
}
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case_ = "van"
def __init__( self : int ,A : str=2_24 ,A : int=3 ,A : Optional[Any]=[7, 3, 3, 3] ,A : Tuple=[4, 2, 2, 2] ,A : Union[str, Any]=[64, 1_28, 3_20, 5_12] ,A : Optional[int]=[3, 3, 12, 3] ,A : Optional[int]=[8, 8, 4, 4] ,A : Any="gelu" ,A : Union[str, Any]=0.02 ,A : Union[str, Any]=1E-6 ,A : Dict=1E-2 ,A : str=0.0 ,A : Optional[int]=0.0 ,**A : List[Any] ,):
super().__init__(**A )
__A = image_size
__A = num_channels
__A = patch_sizes
__A = strides
__A = hidden_sizes
__A = depths
__A = mlp_ratios
__A = hidden_act
__A = initializer_range
__A = layer_norm_eps
__A = layer_scale_init_value
__A = drop_path_rate
__A = dropout_rate
| 55 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
__magic_name__ : Optional[int] = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( __UpperCamelCase ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = ['''pixel_values''']
def __init__( self , lowerCamelCase = True , lowerCamelCase = None , lowerCamelCase = PILImageResampling.BILINEAR , lowerCamelCase = True , lowerCamelCase = None , lowerCamelCase = True , lowerCamelCase = 1 / 255 , lowerCamelCase = True , lowerCamelCase = None , lowerCamelCase = None , **lowerCamelCase , ):
super().__init__(**lowerCamelCase )
_snake_case = size if size is not None else {"shortest_edge": 256}
_snake_case = get_size_dict(lowerCamelCase , default_to_square=lowerCamelCase )
_snake_case = crop_size if crop_size is not None else {"height": 224, "width": 224}
_snake_case = get_size_dict(lowerCamelCase )
_snake_case = do_resize
_snake_case = size
_snake_case = resample
_snake_case = do_center_crop
_snake_case = crop_size
_snake_case = do_rescale
_snake_case = rescale_factor
_snake_case = do_normalize
_snake_case = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_snake_case = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCamelCase( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = PILImageResampling.BICUBIC , lowerCamelCase = None , **lowerCamelCase , ):
_snake_case = get_size_dict(lowerCamelCase , default_to_square=lowerCamelCase )
if "shortest_edge" not in size:
raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
_snake_case = get_resize_output_image_size(lowerCamelCase , size=size["shortest_edge"] , default_to_square=lowerCamelCase )
return resize(lowerCamelCase , size=lowerCamelCase , resample=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def UpperCamelCase( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase , ):
_snake_case = get_size_dict(lowerCamelCase )
return center_crop(lowerCamelCase , size=(size["height"], size["width"]) , data_format=lowerCamelCase , **lowerCamelCase )
def UpperCamelCase( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase ):
return rescale(lowerCamelCase , scale=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def UpperCamelCase( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase , ):
return normalize(lowerCamelCase , mean=lowerCamelCase , std=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def UpperCamelCase( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = ChannelDimension.FIRST , **lowerCamelCase , ):
_snake_case = do_resize if do_resize is not None else self.do_resize
_snake_case = size if size is not None else self.size
_snake_case = get_size_dict(lowerCamelCase , default_to_square=lowerCamelCase )
_snake_case = resample if resample is not None else self.resample
_snake_case = do_center_crop if do_center_crop is not None else self.do_center_crop
_snake_case = crop_size if crop_size is not None else self.crop_size
_snake_case = get_size_dict(lowerCamelCase )
_snake_case = do_rescale if do_rescale is not None else self.do_rescale
_snake_case = rescale_factor if rescale_factor is not None else self.rescale_factor
_snake_case = do_normalize if do_normalize is not None else self.do_normalize
_snake_case = image_mean if image_mean is not None else self.image_mean
_snake_case = image_std if image_std is not None else self.image_std
_snake_case = make_list_of_images(lowerCamelCase )
if not valid_images(lowerCamelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
_snake_case = [to_numpy_array(lowerCamelCase ) for image in images]
if do_resize:
_snake_case = [self.resize(image=lowerCamelCase , size=lowerCamelCase , resample=lowerCamelCase ) for image in images]
if do_center_crop:
_snake_case = [self.center_crop(image=lowerCamelCase , size=lowerCamelCase ) for image in images]
if do_rescale:
_snake_case = [self.rescale(image=lowerCamelCase , scale=lowerCamelCase ) for image in images]
if do_normalize:
_snake_case = [self.normalize(image=lowerCamelCase , mean=lowerCamelCase , std=lowerCamelCase ) for image in images]
_snake_case = [to_channel_dimension_format(lowerCamelCase , lowerCamelCase ) for image in images]
_snake_case = {"pixel_values": images}
return BatchFeature(data=lowerCamelCase , tensor_type=lowerCamelCase )
| 672 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_a : int = {
"configuration_tapas": ["TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP", "TapasConfig"],
"tokenization_tapas": ["TapasTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : int = [
"TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST",
"TapasForMaskedLM",
"TapasForQuestionAnswering",
"TapasForSequenceClassification",
"TapasModel",
"TapasPreTrainedModel",
"load_tf_weights_in_tapas",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : str = [
"TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFTapasForMaskedLM",
"TFTapasForQuestionAnswering",
"TFTapasForSequenceClassification",
"TFTapasModel",
"TFTapasPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig
from .tokenization_tapas import TapasTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tapas import (
TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasPreTrainedModel,
load_tf_weights_in_tapas,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_tapas import (
TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTapasForMaskedLM,
TFTapasForQuestionAnswering,
TFTapasForSequenceClassification,
TFTapasModel,
TFTapasPreTrainedModel,
)
else:
import sys
_a : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 56 |
'''simple docstring'''
import baseaa
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return baseaa.aaaencode(string.encode("utf-8" ) )
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return baseaa.aaadecode(SCREAMING_SNAKE_CASE__ ).decode("utf-8" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 672 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
A_ : Dict = {
'configuration_groupvit': [
'GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'GroupViTConfig',
'GroupViTOnnxConfig',
'GroupViTTextConfig',
'GroupViTVisionConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Tuple = [
'GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'GroupViTModel',
'GroupViTPreTrainedModel',
'GroupViTTextModel',
'GroupViTVisionModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Any = [
'TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFGroupViTModel',
'TFGroupViTPreTrainedModel',
'TFGroupViTTextModel',
'TFGroupViTVisionModel',
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
A_ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 57 |
'''simple docstring'''
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 672 | 0 |
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self , _lowercase , _lowercase=3 , _lowercase=3_2 , _lowercase=3 , _lowercase=1_0 , _lowercase=[1_0, 2_0, 3_0, 4_0] , _lowercase=[1, 1, 2, 1] , _lowercase=True , _lowercase=True , _lowercase="relu" , _lowercase=3 , _lowercase=None , ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Tuple = parent
snake_case_ : str = batch_size
snake_case_ : Union[str, Any] = image_size
snake_case_ : str = num_channels
snake_case_ : Union[str, Any] = embeddings_size
snake_case_ : Tuple = hidden_sizes
snake_case_ : int = depths
snake_case_ : int = is_training
snake_case_ : Dict = use_labels
snake_case_ : List[str] = hidden_act
snake_case_ : List[str] = num_labels
snake_case_ : Optional[Any] = scope
snake_case_ : Tuple = len(_lowercase )
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
snake_case_ : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case_ : Tuple = None
if self.use_labels:
snake_case_ : Optional[Any] = ids_tensor([self.batch_size] , self.num_labels )
snake_case_ : Optional[Any] = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Any = TFResNetModel(config=_lowercase )
snake_case_ : Tuple = model(_lowercase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase ) -> Dict:
'''simple docstring'''
snake_case_ : Union[str, Any] = self.num_labels
snake_case_ : Optional[Any] = TFResNetForImageClassification(_lowercase )
snake_case_ : Optional[int] = model(_lowercase , labels=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
snake_case_ : str = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ : Dict = config_and_inputs
snake_case_ : List[str] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
_lowerCamelCase = (
{'''feature-extraction''': TFResNetModel, '''image-classification''': TFResNetForImageClassification}
if is_tf_available()
else {}
)
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
snake_case_ : List[str] = TFResNetModelTester(self )
snake_case_ : List[Any] = ConfigTester(self , config_class=_lowercase , has_text_modality=_lowercase )
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
return
@unittest.skip(reason="""ResNet does not use inputs_embeds""" )
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
pass
@unittest.skip(reason="""ResNet does not support input and output embeddings""" )
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
pass
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
snake_case_ , snake_case_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ : Optional[Any] = model_class(_lowercase )
snake_case_ : List[Any] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case_ : Any = [*signature.parameters.keys()]
snake_case_ : str = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _lowercase )
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
snake_case_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowercase )
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
def check_hidden_states_output(_lowercase , _lowercase , _lowercase ):
snake_case_ : str = model_class(_lowercase )
snake_case_ : Tuple = model(**self._prepare_for_class(_lowercase , _lowercase ) )
snake_case_ : Optional[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
snake_case_ : Any = self.model_tester.num_stages
self.assertEqual(len(_lowercase ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
snake_case_ , snake_case_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ : Optional[Any] = ["""basic""", """bottleneck"""]
for model_class in self.all_model_classes:
for layer_type in layers_type:
snake_case_ : List[str] = layer_type
snake_case_ : Any = True
check_hidden_states_output(_lowercase , _lowercase , _lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case_ : Tuple = True
check_hidden_states_output(_lowercase , _lowercase , _lowercase )
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
snake_case_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowercase )
@slow
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ : str = TFResNetModel.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : str = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
snake_case_ : Optional[int] = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
snake_case_ : str = self.default_image_processor
snake_case_ : Optional[Any] = prepare_img()
snake_case_ : Dict = image_processor(images=_lowercase , return_tensors="""tf""" )
# forward pass
snake_case_ : Dict = model(**_lowercase )
# verify the logits
snake_case_ : str = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , _lowercase )
snake_case_ : Optional[Any] = tf.constant([-11.1069, -9.7877, -8.3777] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , _lowercase , atol=1E-4 ) )
| 58 |
'''simple docstring'''
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class __SCREAMING_SNAKE_CASE ( datasets.BeamBasedBuilder ):
'''simple docstring'''
def UpperCamelCase( self ):
return datasets.DatasetInfo(
features=datasets.Features({"content": datasets.Value("string" )} ) , supervised_keys=lowerCamelCase , )
def UpperCamelCase( self , lowerCamelCase , lowerCamelCase ):
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"examples": get_test_dummy_examples()} )]
def UpperCamelCase( self , lowerCamelCase , lowerCamelCase ):
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(lowerCamelCase )
class __SCREAMING_SNAKE_CASE ( datasets.BeamBasedBuilder ):
'''simple docstring'''
def UpperCamelCase( self ):
return datasets.DatasetInfo(
features=datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string" )} )} ) , supervised_keys=lowerCamelCase , )
def UpperCamelCase( self , lowerCamelCase , lowerCamelCase ):
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"examples": get_test_nested_examples()} )
]
def UpperCamelCase( self , lowerCamelCase , lowerCamelCase ):
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(lowerCamelCase )
def snake_case_ ( ):
'''simple docstring'''
return [(i, {"content": content}) for i, content in enumerate(["foo", "bar", "foobar"] )]
def snake_case_ ( ):
'''simple docstring'''
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(["foo", "bar", "foobar"] )]
class __SCREAMING_SNAKE_CASE ( __UpperCamelCase ):
'''simple docstring'''
@require_beam
def UpperCamelCase( self ):
_snake_case = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
_snake_case = DummyBeamDataset(cache_dir=lowerCamelCase , beam_runner="DirectRunner" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(lowerCamelCase , builder.name , "default" , "0.0.0" , F'''{builder.name}-train.arrow''' ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"content": datasets.Value("string" )} ) )
_snake_case = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , lowerCamelCase )
self.assertEqual(dset["train"].info.splits["train"].num_examples , lowerCamelCase )
self.assertDictEqual(dset["train"][0] , get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset["train"][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(lowerCamelCase , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
@require_beam
def UpperCamelCase( self ):
import apache_beam as beam
_snake_case = beam.io.parquetio.WriteToParquet
_snake_case = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
_snake_case = DummyBeamDataset(cache_dir=lowerCamelCase , beam_runner="DirectRunner" )
with patch("apache_beam.io.parquetio.WriteToParquet" ) as write_parquet_mock:
_snake_case = partial(lowerCamelCase , num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
lowerCamelCase , builder.name , "default" , "0.0.0" , F'''{builder.name}-train-00000-of-00002.arrow''' ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
lowerCamelCase , builder.name , "default" , "0.0.0" , F'''{builder.name}-train-00000-of-00002.arrow''' ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"content": datasets.Value("string" )} ) )
_snake_case = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , lowerCamelCase )
self.assertEqual(dset["train"].info.splits["train"].num_examples , lowerCamelCase )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset["train"]["content"] ) , sorted(["foo", "bar", "foobar"] ) )
self.assertTrue(
os.path.exists(os.path.join(lowerCamelCase , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
@require_beam
def UpperCamelCase( self ):
with tempfile.TemporaryDirectory() as tmp_cache_dir:
_snake_case = DummyBeamDataset(cache_dir=lowerCamelCase )
self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare )
@require_beam
def UpperCamelCase( self ):
_snake_case = len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
_snake_case = NestedBeamDataset(cache_dir=lowerCamelCase , beam_runner="DirectRunner" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(lowerCamelCase , builder.name , "default" , "0.0.0" , F'''{builder.name}-train.arrow''' ) ) )
self.assertDictEqual(
builder.info.features , datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string" )} )} ) )
_snake_case = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , lowerCamelCase )
self.assertEqual(dset["train"].info.splits["train"].num_examples , lowerCamelCase )
self.assertDictEqual(dset["train"][0] , get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset["train"][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(lowerCamelCase , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
| 672 | 0 |
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def lowerCAmelCase_ ( __a , __a ) -> List[Any]:
"""simple docstring"""
assert isinstance(__a , __a )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def lowerCAmelCase_ ( __a , __a , __a ) -> Any:
"""simple docstring"""
lowerCamelCase__: Any =tmp_path / "cache"
lowerCamelCase__: Optional[int] ={"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowerCamelCase__: Tuple =ParquetDatasetReader(__a , cache_dir=__a , keep_in_memory=__a ).read()
_check_parquet_dataset(__a , __a )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def lowerCAmelCase_ ( __a , __a , __a ) -> List[str]:
"""simple docstring"""
lowerCamelCase__: int =tmp_path / "cache"
lowerCamelCase__: Tuple ={"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowerCamelCase__: Union[str, Any] =features.copy() if features else default_expected_features
lowerCamelCase__: Optional[int] =(
Features({feature: Value(__a ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCamelCase__: int =ParquetDatasetReader(__a , features=__a , cache_dir=__a ).read()
_check_parquet_dataset(__a , __a )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def lowerCAmelCase_ ( __a , __a , __a ) -> Any:
"""simple docstring"""
lowerCamelCase__: Any =tmp_path / "cache"
lowerCamelCase__: Optional[Any] ={"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowerCamelCase__: Optional[Any] =ParquetDatasetReader(__a , cache_dir=__a , split=__a ).read()
_check_parquet_dataset(__a , __a )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type" , [str, list] )
def lowerCAmelCase_ ( __a , __a , __a ) -> int:
"""simple docstring"""
if issubclass(__a , __a ):
lowerCamelCase__: List[Any] =parquet_path
elif issubclass(__a , __a ):
lowerCamelCase__: str =[parquet_path]
lowerCamelCase__: Tuple =tmp_path / "cache"
lowerCamelCase__: Optional[Any] ={"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowerCamelCase__: int =ParquetDatasetReader(__a , cache_dir=__a ).read()
_check_parquet_dataset(__a , __a )
def lowerCAmelCase_ ( __a , __a , __a=("train",) ) -> Dict:
"""simple docstring"""
assert isinstance(__a , __a )
for split in splits:
lowerCamelCase__: Tuple =dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def lowerCAmelCase_ ( __a , __a , __a ) -> Any:
"""simple docstring"""
lowerCamelCase__: List[Any] =tmp_path / "cache"
lowerCamelCase__: Optional[Any] ={"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowerCamelCase__: Tuple =ParquetDatasetReader(
{"train": parquet_path} , cache_dir=__a , keep_in_memory=__a ).read()
_check_parquet_datasetdict(__a , __a )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def lowerCAmelCase_ ( __a , __a , __a ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase__: Tuple =tmp_path / "cache"
lowerCamelCase__: Optional[int] ={"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowerCamelCase__: List[Any] =features.copy() if features else default_expected_features
lowerCamelCase__: int =(
Features({feature: Value(__a ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCamelCase__: Optional[Any] =ParquetDatasetReader({"train": parquet_path} , features=__a , cache_dir=__a ).read()
_check_parquet_datasetdict(__a , __a )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def lowerCAmelCase_ ( __a , __a , __a ) -> Union[str, Any]:
"""simple docstring"""
if split:
lowerCamelCase__: Any ={split: parquet_path}
else:
lowerCamelCase__: int ="train"
lowerCamelCase__: Any ={"train": parquet_path, "test": parquet_path}
lowerCamelCase__: str =tmp_path / "cache"
lowerCamelCase__: Any ={"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowerCamelCase__: int =ParquetDatasetReader(__a , cache_dir=__a ).read()
_check_parquet_datasetdict(__a , __a , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def lowerCAmelCase_ ( __a , __a ) -> int:
"""simple docstring"""
lowerCamelCase__: List[str] =ParquetDatasetWriter(__a , tmp_path / "foo.parquet" )
assert writer.write() > 0
lowerCamelCase__: List[str] =pq.ParquetFile(tmp_path / "foo.parquet" )
lowerCamelCase__: List[str] =pf.read()
assert dataset.data.table == output_table
def lowerCAmelCase_ ( __a , __a ) -> List[str]:
"""simple docstring"""
lowerCamelCase__: List[str] =str(shared_datadir / "test_image_rgb.jpg" )
lowerCamelCase__: Union[str, Any] ={"image": [image_path]}
lowerCamelCase__: Optional[Any] =Features({"image": Image()} )
lowerCamelCase__: Optional[int] =Dataset.from_dict(__a , features=__a )
lowerCamelCase__: Optional[int] =ParquetDatasetWriter(__a , tmp_path / "foo.parquet" )
assert writer.write() > 0
lowerCamelCase__: Dict =Dataset.from_parquet(str(tmp_path / "foo.parquet" ) )
assert dataset.features == reloaded_dataset.features
lowerCamelCase__: Optional[Any] =ParquetDatasetReader(str(tmp_path / "foo.parquet" ) , streaming=__a ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
"feature, expected" , [
(Features({"foo": Value("int32" )} ), None),
(Features({"image": Image(), "foo": Value("int32" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({"nested": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def lowerCAmelCase_ ( __a , __a ) -> Optional[Any]:
"""simple docstring"""
assert get_writer_batch_size(__a ) == expected
| 59 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
__magic_name__ : Optional[int] = False
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
pass
@slow
@require_torch_gpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase( self ):
_snake_case = VersatileDiffusionImageVariationPipeline.from_pretrained("shi-labs/versatile-diffusion" )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
_snake_case = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" )
_snake_case = torch.manual_seed(0 )
_snake_case = pipe(
image=lowerCamelCase , generator=lowerCamelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" , ).images
_snake_case = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
_snake_case = np.array([0.0441, 0.0469, 0.0507, 0.0575, 0.0632, 0.0650, 0.0865, 0.0909, 0.0945] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 672 | 0 |
from sklearn.metrics import matthews_corrcoef
import datasets
lowerCAmelCase_ = '''
Compute the Matthews correlation coefficient (MCC)
The Matthews correlation coefficient is used in machine learning as a
measure of the quality of binary and multiclass classifications. It takes
into account true and false positives and negatives and is generally
regarded as a balanced measure which can be used even if the classes are of
very different sizes. The MCC is in essence a correlation coefficient value
between -1 and +1. A coefficient of +1 represents a perfect prediction, 0
an average random prediction and -1 an inverse prediction. The statistic
is also known as the phi coefficient. [source: Wikipedia]
'''
lowerCAmelCase_ = '''
Args:
predictions (list of int): Predicted labels, as returned by a model.
references (list of int): Ground truth labels.
sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`.
Returns:
matthews_correlation (dict containing float): Matthews correlation.
Examples:
Example 1, a basic example with only predictions and references as inputs:
>>> matthews_metric = datasets.load_metric("matthews_correlation")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3])
>>> print(round(results[\'matthews_correlation\'], 2))
0.54
Example 2, the same example as above, but also including sample weights:
>>> matthews_metric = datasets.load_metric("matthews_correlation")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3],
... sample_weight=[0.5, 3, 1, 1, 1, 2])
>>> print(round(results[\'matthews_correlation\'], 2))
0.1
Example 3, the same example as above, but with sample weights that cause a negative correlation:
>>> matthews_metric = datasets.load_metric("matthews_correlation")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3],
... sample_weight=[0.5, 1, 0, 0, 0, 1])
>>> print(round(results[\'matthews_correlation\'], 2))
-0.25
'''
lowerCAmelCase_ = '''\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class __lowerCAmelCase ( datasets.Metric ):
def lowerCamelCase (self ) -> Dict:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''int32''' ),
'''references''': datasets.Value('''int32''' ),
} ) , reference_urls=[
'''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html'''
] , )
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__=None ) -> int:
'''simple docstring'''
return {
"matthews_correlation": float(matthews_corrcoef(__magic_name__ , __magic_name__ , sample_weight=__magic_name__ ) ),
}
| 60 |
'''simple docstring'''
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.text import TextDatasetReader
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
_snake_case = tmp_path / "cache"
_snake_case = {"text": "string"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_snake_case = TextDatasetReader(SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ , keep_in_memory=SCREAMING_SNAKE_CASE__ ).read()
_check_text_dataset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@pytest.mark.parametrize(
"features" , [
None,
{"text": "string"},
{"text": "int32"},
{"text": "float32"},
] , )
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
_snake_case = tmp_path / "cache"
_snake_case = {"text": "string"}
_snake_case = features.copy() if features else default_expected_features
_snake_case = (
Features({feature: Value(SCREAMING_SNAKE_CASE__ ) for feature, dtype in features.items()} ) if features is not None else None
)
_snake_case = TextDatasetReader(SCREAMING_SNAKE_CASE__ , features=SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ ).read()
_check_text_dataset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
_snake_case = tmp_path / "cache"
_snake_case = {"text": "string"}
_snake_case = TextDatasetReader(SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ , split=SCREAMING_SNAKE_CASE__ ).read()
_check_text_dataset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type" , [str, list] )
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if issubclass(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
_snake_case = text_path
elif issubclass(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
_snake_case = [text_path]
_snake_case = tmp_path / "cache"
_snake_case = {"text": "string"}
_snake_case = TextDatasetReader(SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ ).read()
_check_text_dataset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=("train",) ):
'''simple docstring'''
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
for split in splits:
_snake_case = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
_snake_case = tmp_path / "cache"
_snake_case = {"text": "string"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_snake_case = TextDatasetReader({"train": text_path} , cache_dir=SCREAMING_SNAKE_CASE__ , keep_in_memory=SCREAMING_SNAKE_CASE__ ).read()
_check_text_datasetdict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@pytest.mark.parametrize(
"features" , [
None,
{"text": "string"},
{"text": "int32"},
{"text": "float32"},
] , )
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
_snake_case = tmp_path / "cache"
# CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
_snake_case = {"text": "string"}
_snake_case = features.copy() if features else default_expected_features
_snake_case = (
Features({feature: Value(SCREAMING_SNAKE_CASE__ ) for feature, dtype in features.items()} ) if features is not None else None
)
_snake_case = TextDatasetReader({"train": text_path} , features=SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ ).read()
_check_text_datasetdict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if split:
_snake_case = {split: text_path}
else:
_snake_case = "train"
_snake_case = {"train": text_path, "test": text_path}
_snake_case = tmp_path / "cache"
_snake_case = {"text": "string"}
_snake_case = TextDatasetReader(SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ ).read()
_check_text_datasetdict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
| 672 | 0 |
import numpy as np
import qiskit
def _A ( lowerCAmelCase_ : int = 8 , lowerCAmelCase_ : int | None = None ):
"""simple docstring"""
lowerCAmelCase__ = np.random.default_rng(seed=lowerCAmelCase_ )
# Roughly 25% of the qubits will contribute to the key.
# So we take more than we need.
lowerCAmelCase__ = 6 * key_len
# Measurement basis for Alice's qubits.
lowerCAmelCase__ = rng.integers(2 , size=lowerCAmelCase_ )
# The set of states Alice will prepare.
lowerCAmelCase__ = rng.integers(2 , size=lowerCAmelCase_ )
# Measurement basis for Bob's qubits.
lowerCAmelCase__ = rng.integers(2 , size=lowerCAmelCase_ )
# Quantum Circuit to simulate BB84
lowerCAmelCase__ = qiskit.QuantumCircuit(lowerCAmelCase_ , name="BB84" )
# Alice prepares her qubits according to rules above.
for index, _ in enumerate(lowerCAmelCase_ ):
if alice_state[index] == 1:
bbaa_circ.x(lowerCAmelCase_ )
if alice_basis[index] == 1:
bbaa_circ.h(lowerCAmelCase_ )
bbaa_circ.barrier()
# Bob measures the received qubits according to rules above.
for index, _ in enumerate(lowerCAmelCase_ ):
if bob_basis[index] == 1:
bbaa_circ.h(lowerCAmelCase_ )
bbaa_circ.barrier()
bbaa_circ.measure_all()
# Simulate the quantum circuit.
lowerCAmelCase__ = qiskit.Aer.get_backend("aer_simulator" )
# We only need to run one shot because the key is unique.
# Multiple shots will produce the same key.
lowerCAmelCase__ = qiskit.execute(lowerCAmelCase_ , lowerCAmelCase_ , shots=1 , seed_simulator=lowerCAmelCase_ )
# Returns the result of measurement.
lowerCAmelCase__ = job.result().get_counts(lowerCAmelCase_ ).most_frequent()
# Extracting the generated key from the simulation results.
# Only keep measurement results where Alice and Bob chose the same basis.
lowerCAmelCase__ = "".join(
[
result_bit
for alice_basis_bit, bob_basis_bit, result_bit in zip(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
if alice_basis_bit == bob_basis_bit
] )
# Get final key. Pad with 0 if too short, otherwise truncate.
lowerCAmelCase__ = gen_key[:key_len] if len(lowerCAmelCase_ ) >= key_len else gen_key.ljust(lowerCAmelCase_ , "0" )
return key
if __name__ == "__main__":
print(F"""The generated key is : {bbaa(8, seed=0)}""")
from doctest import testmod
testmod()
| 61 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__magic_name__ : Any = {
"""configuration_swinv2""": ["""SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Swinv2Config"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : Dict = [
"""SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Swinv2ForImageClassification""",
"""Swinv2ForMaskedImageModeling""",
"""Swinv2Model""",
"""Swinv2PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swinva import (
SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinvaForImageClassification,
SwinvaForMaskedImageModeling,
SwinvaModel,
SwinvaPreTrainedModel,
)
else:
import sys
__magic_name__ : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 672 | 0 |
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
snake_case = logging.get_logger(__name__)
snake_case = [
("""bert.bert""", """visual_bert"""),
("""bert.cls""", """cls"""),
("""bert.classifier""", """cls"""),
("""token_type_embeddings_visual""", """visual_token_type_embeddings"""),
("""position_embeddings_visual""", """visual_position_embeddings"""),
("""projection""", """visual_projection"""),
]
snake_case = [
"""nlvr2_coco_pre_trained.th""",
"""nlvr2_fine_tuned.th""",
"""nlvr2_pre_trained.th""",
"""vcr_coco_pre_train.th""",
"""vcr_fine_tune.th""",
"""vcr_pre_train.th""",
"""vqa_coco_pre_trained.th""",
"""vqa_fine_tuned.th""",
"""vqa_pre_trained.th""",
]
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = torch.load(lowercase , map_location="cpu" )
return sd
def lowerCamelCase__ ( lowercase , lowercase , lowercase=rename_keys_prefix ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = OrderedDict()
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
SCREAMING_SNAKE_CASE : Union[str, Any] = key
for name_pair in rename_keys_prefix:
SCREAMING_SNAKE_CASE : Any = new_key.replace(name_pair[0] , name_pair[1] )
SCREAMING_SNAKE_CASE : Dict = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
SCREAMING_SNAKE_CASE : Union[str, Any] = new_d["cls.predictions.bias"]
return new_d
@torch.no_grad()
def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
assert (
checkpoint_path.split("/" )[-1] in ACCEPTABLE_CHECKPOINTS
), F'''The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.'''
# Get Config
if "pre" in checkpoint_path:
SCREAMING_SNAKE_CASE : List[Any] = "pretraining"
if "vcr" in checkpoint_path:
SCREAMING_SNAKE_CASE : int = {"visual_embedding_dim": 512}
elif "vqa_advanced" in checkpoint_path:
SCREAMING_SNAKE_CASE : List[Any] = {"visual_embedding_dim": 2048}
elif "vqa" in checkpoint_path:
SCREAMING_SNAKE_CASE : Any = {"visual_embedding_dim": 2048}
elif "nlvr" in checkpoint_path:
SCREAMING_SNAKE_CASE : str = {"visual_embedding_dim": 1024}
else:
raise NotImplementedError(F'''No implementation found for `{checkpoint_path}`.''' )
else:
if "vcr" in checkpoint_path:
SCREAMING_SNAKE_CASE : str = {"visual_embedding_dim": 512}
SCREAMING_SNAKE_CASE : Tuple = "multichoice"
elif "vqa_advanced" in checkpoint_path:
SCREAMING_SNAKE_CASE : List[str] = {"visual_embedding_dim": 2048}
SCREAMING_SNAKE_CASE : str = "vqa_advanced"
elif "vqa" in checkpoint_path:
SCREAMING_SNAKE_CASE : Tuple = {"visual_embedding_dim": 2048, "num_labels": 3129}
SCREAMING_SNAKE_CASE : Optional[Any] = "vqa"
elif "nlvr" in checkpoint_path:
SCREAMING_SNAKE_CASE : int = {
"visual_embedding_dim": 1024,
"num_labels": 2,
}
SCREAMING_SNAKE_CASE : Tuple = "nlvr"
SCREAMING_SNAKE_CASE : List[Any] = VisualBertConfig(**lowercase )
# Load State Dict
SCREAMING_SNAKE_CASE : List[str] = load_state_dict(lowercase )
SCREAMING_SNAKE_CASE : Any = get_new_dict(lowercase , lowercase )
if model_type == "pretraining":
SCREAMING_SNAKE_CASE : int = VisualBertForPreTraining(lowercase )
elif model_type == "vqa":
SCREAMING_SNAKE_CASE : Tuple = VisualBertForQuestionAnswering(lowercase )
elif model_type == "nlvr":
SCREAMING_SNAKE_CASE : List[Any] = VisualBertForVisualReasoning(lowercase )
elif model_type == "multichoice":
SCREAMING_SNAKE_CASE : Optional[Any] = VisualBertForMultipleChoice(lowercase )
model.load_state_dict(lowercase )
# Save Checkpoints
Path(lowercase ).mkdir(exist_ok=lowercase )
model.save_pretrained(lowercase )
if __name__ == "__main__":
snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""orig_checkpoint_path""", type=str, help="""A path to .th on local filesystem.""")
parser.add_argument("""pytorch_dump_folder_path""", type=str, help="""Path to the output PyTorch model.""")
snake_case = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 62 |
'''simple docstring'''
import math
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return math.pow(SCREAMING_SNAKE_CASE__ , 2 ) - a
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return 2 * x
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
_snake_case = 2.0
while start <= a:
_snake_case = math.pow(SCREAMING_SNAKE_CASE__ , 2 )
return start
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = 99_99 , SCREAMING_SNAKE_CASE__ = 0.00000000000001 ):
'''simple docstring'''
if a < 0:
raise ValueError("math domain error" )
_snake_case = get_initial_point(SCREAMING_SNAKE_CASE__ )
for _ in range(SCREAMING_SNAKE_CASE__ ):
_snake_case = value
_snake_case = value - fx(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) / fx_derivative(SCREAMING_SNAKE_CASE__ )
if abs(prev_value - value ) < tolerance:
return value
return value
if __name__ == "__main__":
from doctest import testmod
testmod()
| 672 | 0 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self : List[str] ) -> str:
__UpperCAmelCase : List[str] = tempfile.mkdtemp()
__UpperCAmelCase : Optional[int] = BlipImageProcessor()
__UpperCAmelCase : Any = BertTokenizer.from_pretrained("""hf-internal-testing/tiny-random-BertModel""" )
__UpperCAmelCase : Optional[Any] = BlipProcessor(__lowercase , __lowercase )
processor.save_pretrained(self.tmpdirname )
def UpperCAmelCase ( self : int , **__lowercase : Optional[Any] ) -> Any:
return AutoProcessor.from_pretrained(self.tmpdirname , **__lowercase ).tokenizer
def UpperCAmelCase ( self : Optional[Any] , **__lowercase : int ) -> str:
return AutoProcessor.from_pretrained(self.tmpdirname , **__lowercase ).image_processor
def UpperCAmelCase ( self : int ) -> int:
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase ( self : Union[str, Any] ) -> Dict:
__UpperCAmelCase : List[str] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__UpperCAmelCase : List[Any] = [Image.fromarray(np.moveaxis(__lowercase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCAmelCase ( self : int ) -> Any:
__UpperCAmelCase : int = BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__UpperCAmelCase : str = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
__UpperCAmelCase : Optional[Any] = self.get_image_processor(do_normalize=__lowercase , padding_value=1.0 )
__UpperCAmelCase : List[str] = BlipProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=__lowercase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __lowercase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowercase )
def UpperCAmelCase ( self : List[Any] ) -> int:
__UpperCAmelCase : List[Any] = self.get_image_processor()
__UpperCAmelCase : Dict = self.get_tokenizer()
__UpperCAmelCase : Optional[Any] = BlipProcessor(tokenizer=__lowercase , image_processor=__lowercase )
__UpperCAmelCase : Dict = self.prepare_image_inputs()
__UpperCAmelCase : str = image_processor(__lowercase , return_tensors="""np""" )
__UpperCAmelCase : Dict = processor(images=__lowercase , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def UpperCAmelCase ( self : Union[str, Any] ) -> Dict:
__UpperCAmelCase : Optional[Any] = self.get_image_processor()
__UpperCAmelCase : Dict = self.get_tokenizer()
__UpperCAmelCase : Union[str, Any] = BlipProcessor(tokenizer=__lowercase , image_processor=__lowercase )
__UpperCAmelCase : str = """lower newer"""
__UpperCAmelCase : int = processor(text=__lowercase )
__UpperCAmelCase : Any = tokenizer(__lowercase , return_token_type_ids=__lowercase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCAmelCase ( self : Dict ) -> Any:
__UpperCAmelCase : Tuple = self.get_image_processor()
__UpperCAmelCase : Any = self.get_tokenizer()
__UpperCAmelCase : Dict = BlipProcessor(tokenizer=__lowercase , image_processor=__lowercase )
__UpperCAmelCase : Union[str, Any] = """lower newer"""
__UpperCAmelCase : Tuple = self.prepare_image_inputs()
__UpperCAmelCase : int = processor(text=__lowercase , images=__lowercase )
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """input_ids""", """attention_mask"""] )
# test if it raises when no input is passed
with pytest.raises(__lowercase ):
processor()
def UpperCAmelCase ( self : str ) -> List[Any]:
__UpperCAmelCase : Optional[Any] = self.get_image_processor()
__UpperCAmelCase : List[str] = self.get_tokenizer()
__UpperCAmelCase : Union[str, Any] = BlipProcessor(tokenizer=__lowercase , image_processor=__lowercase )
__UpperCAmelCase : str = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__UpperCAmelCase : str = processor.batch_decode(__lowercase )
__UpperCAmelCase : Dict = tokenizer.batch_decode(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
def UpperCAmelCase ( self : Tuple ) -> Optional[Any]:
__UpperCAmelCase : int = self.get_image_processor()
__UpperCAmelCase : List[str] = self.get_tokenizer()
__UpperCAmelCase : Dict = BlipProcessor(tokenizer=__lowercase , image_processor=__lowercase )
__UpperCAmelCase : List[Any] = """lower newer"""
__UpperCAmelCase : List[str] = self.prepare_image_inputs()
__UpperCAmelCase : Optional[Any] = processor(text=__lowercase , images=__lowercase )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """input_ids""", """attention_mask"""] )
| 63 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ : Optional[int] = logging.get_logger(__name__)
__magic_name__ : Optional[int] = {
"""microsoft/git-base""": """https://huggingface.co/microsoft/git-base/resolve/main/config.json""",
}
class __SCREAMING_SNAKE_CASE ( __UpperCamelCase ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = '''git_vision_model'''
def __init__( self , lowerCamelCase=768 , lowerCamelCase=3_072 , lowerCamelCase=12 , lowerCamelCase=12 , lowerCamelCase=3 , lowerCamelCase=224 , lowerCamelCase=16 , lowerCamelCase="quick_gelu" , lowerCamelCase=1e-5 , lowerCamelCase=0.0 , lowerCamelCase=0.02 , **lowerCamelCase , ):
super().__init__(**lowerCamelCase )
_snake_case = hidden_size
_snake_case = intermediate_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = num_channels
_snake_case = patch_size
_snake_case = image_size
_snake_case = initializer_range
_snake_case = attention_dropout
_snake_case = layer_norm_eps
_snake_case = hidden_act
@classmethod
def UpperCamelCase( cls , lowerCamelCase , **lowerCamelCase ):
cls._set_token_in_kwargs(lowerCamelCase )
_snake_case , _snake_case = cls.get_config_dict(lowerCamelCase , **lowerCamelCase )
# get the vision config dict if we are loading from GITConfig
if config_dict.get("model_type" ) == "git":
_snake_case = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(lowerCamelCase , **lowerCamelCase )
class __SCREAMING_SNAKE_CASE ( __UpperCamelCase ):
'''simple docstring'''
UpperCAmelCase__ : int = '''git'''
def __init__( self , lowerCamelCase=None , lowerCamelCase=30_522 , lowerCamelCase=768 , lowerCamelCase=6 , lowerCamelCase=12 , lowerCamelCase=3_072 , lowerCamelCase="gelu" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=1_024 , lowerCamelCase=0.02 , lowerCamelCase=1e-12 , lowerCamelCase=0 , lowerCamelCase="absolute" , lowerCamelCase=True , lowerCamelCase=False , lowerCamelCase=101 , lowerCamelCase=102 , lowerCamelCase=None , **lowerCamelCase , ):
super().__init__(bos_token_id=lowerCamelCase , eos_token_id=lowerCamelCase , pad_token_id=lowerCamelCase , **lowerCamelCase )
if vision_config is None:
_snake_case = {}
logger.info("vision_config is None. initializing the GitVisionConfig with default values." )
_snake_case = GitVisionConfig(**lowerCamelCase )
_snake_case = vocab_size
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = hidden_act
_snake_case = intermediate_size
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = max_position_embeddings
_snake_case = initializer_range
_snake_case = layer_norm_eps
_snake_case = position_embedding_type
_snake_case = use_cache
_snake_case = tie_word_embeddings
_snake_case = num_image_with_embedding
_snake_case = bos_token_id
_snake_case = eos_token_id
def UpperCamelCase( self ):
_snake_case = copy.deepcopy(self.__dict__ )
_snake_case = self.vision_config.to_dict()
_snake_case = self.__class__.model_type
return output
| 672 | 0 |
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowercase_ : Any = logging.get_logger(__name__)
lowercase_ : List[str] = {'vocab_file': 'spiece.model'}
lowercase_ : Dict = {
'vocab_file': {
'AI-Sweden/gpt-sw3-126m': 'https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-350m': 'https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-1.6b': 'https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-6.7b': 'https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-20b': 'https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model',
}
}
lowercase_ : Tuple = {
'AI-Sweden/gpt-sw3-126m': 2_0_4_8,
'AI-Sweden/gpt-sw3-350m': 2_0_4_8,
'AI-Sweden/gpt-sw3-1.6b': 2_0_4_8,
'AI-Sweden/gpt-sw3-6.7b': 2_0_4_8,
'AI-Sweden/gpt-sw3-20b': 2_0_4_8,
}
class _lowerCamelCase ( UpperCamelCase_ ):
__a = VOCAB_FILES_NAMES
__a = PRETRAINED_VOCAB_FILES_MAP
__a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a = ["input_ids", "attention_mask"]
def __init__( self , lowerCAmelCase , lowerCAmelCase=False , lowerCAmelCase=False , lowerCAmelCase=False , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase = None , **lowerCAmelCase , ) -> None:
SCREAMING_SNAKE_CASE__: Optional[Any]= {} if sp_model_kwargs is None else sp_model_kwargs
SCREAMING_SNAKE_CASE__: Union[str, Any]= kwargs.get('''name_or_path''' )
if name_or_path is None:
logger.warning(
'''name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,'''
''' you are testing the model, this can safely be ignored''' )
SCREAMING_SNAKE_CASE__: Dict= '''None'''
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
SCREAMING_SNAKE_CASE__: List[Any]= '''<|endoftext|>''' if eos_token is None else eos_token
SCREAMING_SNAKE_CASE__: List[Any]= '''<unk>''' if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
SCREAMING_SNAKE_CASE__: List[str]= unk_token if pad_token is None else pad_token
SCREAMING_SNAKE_CASE__: Dict= eos_token if bos_token is None else bos_token
else:
SCREAMING_SNAKE_CASE__: Optional[Any]= '''<pad>''' if pad_token is None else pad_token
SCREAMING_SNAKE_CASE__: List[Any]= '''<s>''' if bos_token is None else bos_token
super().__init__(
do_lower_case=lowerCAmelCase , remove_space=lowerCAmelCase , keep_accents=lowerCAmelCase , bos_token=lowerCAmelCase , eos_token=lowerCAmelCase , unk_token=lowerCAmelCase , pad_token=lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **lowerCAmelCase , )
SCREAMING_SNAKE_CASE__: Union[str, Any]= do_lower_case
SCREAMING_SNAKE_CASE__: int= remove_space
SCREAMING_SNAKE_CASE__: List[str]= keep_accents
SCREAMING_SNAKE_CASE__: int= vocab_file
SCREAMING_SNAKE_CASE__: int= spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowerCAmelCase )
# Used for whitespace normalization in input texts
# fmt : off
SCREAMING_SNAKE_CASE__: Optional[int]= {''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', '''''', ''''''}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
SCREAMING_SNAKE_CASE__: str= re.compile(
f'[{"".join(map(lowerCAmelCase , list(range(0 , 9 ) ) + list(range(11 , 32 ) ) + list(range(127 , 160 ) ) + [160, 173, 8203] ) )}]' )
def __getstate__( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__: Any= self.__dict__.copy()
SCREAMING_SNAKE_CASE__: Dict= None
return state
def __setstate__( self , lowerCAmelCase ) -> List[Any]:
SCREAMING_SNAKE_CASE__: List[Any]= d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
SCREAMING_SNAKE_CASE__: Union[str, Any]= {}
SCREAMING_SNAKE_CASE__: Tuple= spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def UpperCamelCase_ ( self ) -> int:
return len(self.sp_model )
def UpperCamelCase_ ( self , lowerCAmelCase ) -> str:
SCREAMING_SNAKE_CASE__: Any= self.non_printing_characters_re.sub('''''' , lowerCAmelCase )
# Normalize whitespaces
SCREAMING_SNAKE_CASE__: Any= ''''''.join([char if char not in self.whitespaces else ''' ''' for char in text] )
# NFC Unicode normalization
SCREAMING_SNAKE_CASE__: Optional[Any]= unicodedata.normalize('''NFC''' , lowerCAmelCase )
return text
def UpperCamelCase_ ( self , lowerCAmelCase , **lowerCAmelCase ) -> List[str]:
SCREAMING_SNAKE_CASE__: Union[str, Any]= self.preprocess_text(lowerCAmelCase )
return self.sp_model.encode(lowerCAmelCase , out_type=lowerCAmelCase )
def UpperCamelCase_ ( self , lowerCAmelCase ) -> int:
return self.sp_model.PieceToId(lowerCAmelCase )
def UpperCamelCase_ ( self , lowerCAmelCase ) -> str:
return self.sp_model.IdToPiece(lowerCAmelCase )
@staticmethod
def UpperCamelCase_ ( lowerCAmelCase ) -> str:
return out_string
def UpperCamelCase_ ( self , lowerCAmelCase ) -> str:
SCREAMING_SNAKE_CASE__: Union[str, Any]= []
SCREAMING_SNAKE_CASE__: Optional[int]= ''''''
SCREAMING_SNAKE_CASE__: Any= False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowerCAmelCase ) + token
SCREAMING_SNAKE_CASE__: Union[str, Any]= True
SCREAMING_SNAKE_CASE__: Tuple= []
else:
current_sub_tokens.append(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: int= False
out_string += self.sp_model.decode(lowerCAmelCase )
return out_string
def UpperCamelCase_ ( self ) -> Dict[str, int]:
SCREAMING_SNAKE_CASE__: List[Any]= {self.convert_ids_to_tokens(lowerCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase = None ) -> Tuple[str]:
if not os.path.isdir(lowerCAmelCase ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
SCREAMING_SNAKE_CASE__: Optional[int]= os.path.join(
lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCAmelCase , '''wb''' ) as fi:
SCREAMING_SNAKE_CASE__: Optional[Any]= self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase )
return (out_vocab_file,)
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase = False ) -> Union[List[int], List[List[int]], "torch.Tensor"]:
if isinstance(lowerCAmelCase , lowerCAmelCase ):
SCREAMING_SNAKE_CASE__: List[Any]= self.preprocess_text(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Optional[int]= self.sp_model.encode(lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE__: Dict= [self.preprocess_text(lowerCAmelCase ) for t in text]
SCREAMING_SNAKE_CASE__: Optional[Any]= self.sp_model.encode(lowerCAmelCase )
if return_tensors is True or return_tensors == "pt":
SCREAMING_SNAKE_CASE__: Dict= torch.tensor(lowerCAmelCase )
return token_ids
def UpperCamelCase_ ( self , lowerCAmelCase ) -> str:
return self.sp_model.decode(lowerCAmelCase )
def UpperCamelCase_ ( self , lowerCAmelCase ) -> List[int]:
SCREAMING_SNAKE_CASE__: Tuple= [f'User: {text}' if is_user else f'Bot: {text}' for is_user, text in conversation.iter_texts()]
SCREAMING_SNAKE_CASE__: int= (
f'{self.eos_token}{self.bos_token}' + f'{self.bos_token}'.join(lowerCAmelCase ) + f'{self.bos_token}Bot:'
)
return self.encode(text=lowerCAmelCase )
| 64 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import tensorflow as tf
from .utils import logging
__magic_name__ : Dict = logging.get_logger(__name__)
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if isinstance(SCREAMING_SNAKE_CASE__ , np.ndarray ):
return list(tensor.shape )
_snake_case = tf.shape(SCREAMING_SNAKE_CASE__ )
if tensor.shape == tf.TensorShape(SCREAMING_SNAKE_CASE__ ):
return dynamic
_snake_case = tensor.shape.as_list()
return [dynamic[i] if s is None else s for i, s in enumerate(SCREAMING_SNAKE_CASE__ )]
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None ):
'''simple docstring'''
return tf.nn.softmax(logits=logits + 1E-9 , axis=SCREAMING_SNAKE_CASE__ , name=SCREAMING_SNAKE_CASE__ )
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=1E-5 , SCREAMING_SNAKE_CASE__=-1 ):
'''simple docstring'''
if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
raise NotImplementedError("Only 1D weight and bias tensors are supported for now, with only a single axis." )
# Get mean and variance on the axis to be normalized
_snake_case , _snake_case = tf.nn.moments(SCREAMING_SNAKE_CASE__ , axes=[axis] , keepdims=SCREAMING_SNAKE_CASE__ )
if axis != -1:
# Reshape scale and weight to have the same rank as inputs, but with 1 dimensions
# on every dimension except axis
_snake_case = [1] * inputs.shape.rank
_snake_case = shape_list(SCREAMING_SNAKE_CASE__ )[axis]
_snake_case = tf.reshape(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
_snake_case = tf.reshape(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Compute layer normalization using the batch_normalization
# function.
_snake_case = tf.nn.batch_normalization(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , offset=SCREAMING_SNAKE_CASE__ , scale=SCREAMING_SNAKE_CASE__ , variance_epsilon=SCREAMING_SNAKE_CASE__ , )
return outputs
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=0 , SCREAMING_SNAKE_CASE__=-1 ):
'''simple docstring'''
if end_dim < 0:
end_dim += input.shape.rank
if start_dim < 0:
start_dim += input.shape.rank
if start_dim == end_dim:
return input
_snake_case = tf.shape(SCREAMING_SNAKE_CASE__ )
_snake_case = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] )
_snake_case = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] , axis=0 )
return tf.reshape(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE__ , tf.Tensor ):
_snake_case = tf.convert_to_tensor(SCREAMING_SNAKE_CASE__ ) # Catches stray NumPy inputs
if encoder_attention_mask.shape.rank == 3:
_snake_case = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.shape.rank == 2:
_snake_case = encoder_attention_mask[:, None, None, :]
# T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
# Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow
# /transformer/transformer_layers.py#L270
# encoder_extended_attention_mask = (encoder_extended_attention_mask ==
# encoder_extended_attention_mask.transpose(-1, -2))
_snake_case = (
tf.cast(1 , encoder_attention_mask.dtype ) - encoder_extended_attention_mask
) * encoder_extended_attention_mask.dtype.min
return encoder_extended_attention_mask
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = "input_ids" ):
'''simple docstring'''
tf.debugging.assert_less(
SCREAMING_SNAKE_CASE__ , tf.cast(SCREAMING_SNAKE_CASE__ , dtype=tensor.dtype ) , message=(
f'''The maximum value of {tensor_name} ({tf.math.reduce_max(SCREAMING_SNAKE_CASE__ )}) must be smaller than the embedding '''
f'''layer\'s input dimension ({embed_dim}). The likely cause is some problem at tokenization time.'''
) , )
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
_snake_case = 6_45_12
# Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT`
# because in that case even chunking the array would not make the saving
# possible.
_snake_case = [x for x in data if len(SCREAMING_SNAKE_CASE__ ) > HDF5_OBJECT_HEADER_LIMIT]
# Expecting this to never be true.
if bad_attributes:
raise RuntimeError(
"The following attributes cannot be saved to HDF5 file because "
f'''they are larger than {HDF5_OBJECT_HEADER_LIMIT} '''
f'''bytes: {bad_attributes}''' )
_snake_case = np.asarray(SCREAMING_SNAKE_CASE__ )
_snake_case = 1
_snake_case = np.array_split(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# This will never loop forever thanks to the test above.
while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ):
num_chunks += 1
_snake_case = np.array_split(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if num_chunks > 1:
for chunk_id, chunk_data in enumerate(SCREAMING_SNAKE_CASE__ ):
_snake_case = chunk_data
else:
_snake_case = data
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if name in group.attrs:
_snake_case = [n.decode("utf8" ) if hasattr(SCREAMING_SNAKE_CASE__ , "decode" ) else n for n in group.attrs[name]]
else:
_snake_case = []
_snake_case = 0
while "%s%d" % (name, chunk_id) in group.attrs:
data.extend(
[n.decode("utf8" ) if hasattr(SCREAMING_SNAKE_CASE__ , "decode" ) else n for n in group.attrs["%s%d" % (name, chunk_id)]] )
chunk_id += 1
return data
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def _expand_single_ad_tensor(SCREAMING_SNAKE_CASE__ ):
if isinstance(SCREAMING_SNAKE_CASE__ , tf.Tensor ) and t.shape.rank == 1:
return tf.expand_dims(SCREAMING_SNAKE_CASE__ , axis=-1 )
return t
return tf.nest.map_structure(_expand_single_ad_tensor , SCREAMING_SNAKE_CASE__ )
| 672 | 0 |
"""simple docstring"""
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = TaConfig.from_json_file(__UpperCamelCase )
print(F"Building PyTorch model from configuration: {config}" )
UpperCAmelCase__ : Any = TaForConditionalGeneration(__UpperCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_ta(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# Save pytorch-model
print(F"Save PyTorch model to {pytorch_dump_path}" )
model.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__UpperCAmelCase = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 65 |
'''simple docstring'''
__magic_name__ : int = """Alexander Joslin"""
import operator as op
from .stack import Stack
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
_snake_case = {"*": op.mul, "/": op.truediv, "+": op.add, "-": op.sub}
_snake_case = Stack()
_snake_case = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(SCREAMING_SNAKE_CASE__ ) )
elif i in operators:
# RULE 2
operator_stack.push(SCREAMING_SNAKE_CASE__ )
elif i == ")":
# RULE 4
_snake_case = operator_stack.peek()
operator_stack.pop()
_snake_case = operand_stack.peek()
operand_stack.pop()
_snake_case = operand_stack.peek()
operand_stack.pop()
_snake_case = operators[opr](SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
operand_stack.push(SCREAMING_SNAKE_CASE__ )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
__magic_name__ : List[str] = """(5 + ((4 * 2) * (2 + 3)))"""
# answer = 45
print(F'{equation} = {dijkstras_two_stack_algorithm(equation)}')
| 672 | 0 |
from ..utils import DummyObject, requires_backends
class lowerCAmelCase_ ( metaclass=__snake_case ):
_UpperCamelCase : Dict = ["torch", "scipy"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(self , ['torch', 'scipy'] )
@classmethod
def __a ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['torch', 'scipy'] )
@classmethod
def __a ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['torch', 'scipy'] )
| 66 |
'''simple docstring'''
from torch import nn
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(f'''Unsupported activation function: {act_fn}''' )
| 672 | 0 |
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Tuple , snake_case__ :Union[str, Any] , snake_case__ :str , snake_case__ :int ) -> Dict:
for param, grad_param in zip(model_a.parameters() , model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is False
), F"""Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})"""
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is True
), F"""Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})"""
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Optional[Any] , snake_case__ :List[str] , snake_case__ :Any , snake_case__ :int , snake_case__ :List[str]=True ) -> int:
model.train()
_lowercase = model(snake_case__ )
_lowercase = F.mse_loss(snake_case__ , target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(snake_case__ )
def SCREAMING_SNAKE_CASE__ ( snake_case__ :int , snake_case__ :Dict=False ) -> Optional[int]:
set_seed(42 )
_lowercase = RegressionModel()
_lowercase = deepcopy(snake_case__ )
_lowercase = RegressionDataset(length=80 )
_lowercase = DataLoader(snake_case__ , batch_size=16 )
model.to(accelerator.device )
if sched:
_lowercase = AdamW(params=model.parameters() , lr=1E-3 )
_lowercase = AdamW(params=ddp_model.parameters() , lr=1E-3 )
_lowercase = LambdaLR(snake_case__ , lr_lambda=lambda snake_case__ : epoch**0.65 )
_lowercase = LambdaLR(snake_case__ , lr_lambda=lambda snake_case__ : epoch**0.65 )
# Make a copy of `model`
if sched:
_lowercase , _lowercase , _lowercase , _lowercase = accelerator.prepare(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
else:
_lowercase , _lowercase = accelerator.prepare(snake_case__ , snake_case__ )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Any ) -> List[str]:
# Test when on a single CPU or GPU that the context manager does nothing
_lowercase , _lowercase , _lowercase = get_training_setup(snake_case__ )
# Use a single batch
_lowercase , _lowercase = next(iter(snake_case__ ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
_lowercase , _lowercase = accelerator.gather((ddp_input, ddp_target) )
_lowercase , _lowercase = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(snake_case__ ):
step_model(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
else:
# Sync grads
step_model(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad ), F"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"""
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
_lowercase = ddp_input[torch.randperm(len(snake_case__ ) )]
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Any ) -> int:
# Test on distributed setup that context manager behaves properly
_lowercase , _lowercase , _lowercase = get_training_setup(snake_case__ )
# Use a single batch
_lowercase , _lowercase = next(iter(snake_case__ ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
_lowercase , _lowercase = accelerator.gather((ddp_input, ddp_target) )
_lowercase , _lowercase = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(snake_case__ ):
step_model(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
else:
# Sync grads
step_model(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), F"""Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})"""
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), F"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"""
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
_lowercase = ddp_input[torch.randperm(len(snake_case__ ) )]
def SCREAMING_SNAKE_CASE__ ( snake_case__ :List[str]=False , snake_case__ :Optional[Any]=False ) -> Any:
_lowercase = Accelerator(
split_batches=snake_case__ , dispatch_batches=snake_case__ , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
_lowercase , _lowercase , _lowercase = get_training_setup(snake_case__ )
for iteration, batch in enumerate(snake_case__ ):
_lowercase , _lowercase = batch.values()
# Gather the distributed inputs and targs for the base model
_lowercase , _lowercase = accelerator.gather((ddp_input, ddp_target) )
_lowercase , _lowercase = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(snake_case__ ):
step_model(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(snake_case__ ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), F"""Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"""
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), F"""Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})"""
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
_lowercase = ddp_input[torch.randperm(len(snake_case__ ) )]
GradientState._reset_state()
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Any=False , snake_case__ :List[str]=False ) -> Tuple:
_lowercase = Accelerator(
split_batches=snake_case__ , dispatch_batches=snake_case__ , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase = get_training_setup(snake_case__ , snake_case__ )
for iteration, batch in enumerate(snake_case__ ):
_lowercase , _lowercase = batch.values()
# Gather the distributed inputs and targs for the base model
_lowercase , _lowercase = accelerator.gather((ddp_input, ddp_target) )
_lowercase , _lowercase = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(snake_case__ )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(snake_case__ ):
step_model(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), F"""Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n"""
_lowercase = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(snake_case__ ))
if accelerator.num_processes > 1:
check_model_parameters(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
GradientState._reset_state()
def SCREAMING_SNAKE_CASE__ ( ) -> int:
_lowercase = Accelerator()
_lowercase = RegressionDataset(length=80 )
_lowercase = DataLoader(snake_case__ , batch_size=16 )
_lowercase = RegressionDataset(length=96 )
_lowercase = DataLoader(snake_case__ , batch_size=16 )
_lowercase , _lowercase = accelerator.prepare(snake_case__ , snake_case__ )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(snake_case__ ):
assert id(accelerator.gradient_state.active_dataloader ) == id(snake_case__ )
if iteration < len(snake_case__ ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(snake_case__ ):
assert id(accelerator.gradient_state.active_dataloader ) == id(snake_case__ )
if batch_num < len(snake_case__ ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def SCREAMING_SNAKE_CASE__ ( ) -> Dict:
_lowercase = Accelerator()
_lowercase = accelerator.state
if state.local_process_index == 0:
print('**Test `accumulate` gradient accumulation with dataloader break**' )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print('**Test NOOP `no_sync` context manager**' )
test_noop_sync(snake_case__ )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print('**Test Distributed `no_sync` context manager**' )
test_distributed_sync(snake_case__ )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
'**Test `accumulate` gradient accumulation, ' , F"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""" , )
test_gradient_accumulation(snake_case__ , snake_case__ )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version('<' , '2.0' ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
'**Test `accumulate` gradient accumulation with optimizer and scheduler, ' , '`split_batches=False`, `dispatch_batches=False`**' , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
'**Test `accumulate` gradient accumulation with optimizer and scheduler, ' , F"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""" , )
test_gradient_accumulation_with_opt_and_scheduler(snake_case__ , snake_case__ )
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Optional[int] ) -> Any:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main() | 67 |
'''simple docstring'''
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
__magic_name__ : Tuple = 0
__magic_name__ : Dict = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
__magic_name__ : str = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
__magic_name__ : Dict = tuple[int, int]
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ):
_snake_case = pos_x
_snake_case = pos_y
_snake_case = (pos_y, pos_x)
_snake_case = goal_x
_snake_case = goal_y
_snake_case = g_cost
_snake_case = parent
_snake_case = self.calculate_heuristic()
_snake_case = self.g_cost + self.h_cost
def UpperCamelCase( self ):
_snake_case = self.pos_x - self.goal_x
_snake_case = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(lowerCamelCase ) + abs(lowerCamelCase )
else:
return sqrt(dy**2 + dx**2 )
def __lt__( self , lowerCamelCase ):
return self.f_cost < other.f_cost
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self , lowerCamelCase , lowerCamelCase ):
_snake_case = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , lowerCamelCase )
_snake_case = Node(goal[1] , goal[0] , goal[1] , goal[0] , 99_999 , lowerCamelCase )
_snake_case = [self.start]
_snake_case = []
_snake_case = False
def UpperCamelCase( self ):
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
_snake_case = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(lowerCamelCase )
self.closed_nodes.append(lowerCamelCase )
_snake_case = self.get_successors(lowerCamelCase )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(lowerCamelCase )
else:
# retrieve the best current path
_snake_case = self.open_nodes.pop(self.open_nodes.index(lowerCamelCase ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(lowerCamelCase )
else:
self.open_nodes.append(lowerCamelCase )
return [self.start.pos]
def UpperCamelCase( self , lowerCamelCase ):
_snake_case = []
for action in delta:
_snake_case = parent.pos_x + action[1]
_snake_case = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(lowerCamelCase ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
lowerCamelCase , lowerCamelCase , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , lowerCamelCase , ) )
return successors
def UpperCamelCase( self , lowerCamelCase ):
_snake_case = node
_snake_case = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
_snake_case = current_node.parent
path.reverse()
return path
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self , lowerCamelCase , lowerCamelCase ):
_snake_case = AStar(lowerCamelCase , lowerCamelCase )
_snake_case = AStar(lowerCamelCase , lowerCamelCase )
_snake_case = False
def UpperCamelCase( self ):
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
_snake_case = self.fwd_astar.open_nodes.pop(0 )
_snake_case = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
lowerCamelCase , lowerCamelCase )
self.fwd_astar.closed_nodes.append(lowerCamelCase )
self.bwd_astar.closed_nodes.append(lowerCamelCase )
_snake_case = current_bwd_node
_snake_case = current_fwd_node
_snake_case = {
self.fwd_astar: self.fwd_astar.get_successors(lowerCamelCase ),
self.bwd_astar: self.bwd_astar.get_successors(lowerCamelCase ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(lowerCamelCase )
else:
# retrieve the best current path
_snake_case = astar.open_nodes.pop(
astar.open_nodes.index(lowerCamelCase ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(lowerCamelCase )
else:
astar.open_nodes.append(lowerCamelCase )
return [self.fwd_astar.start.pos]
def UpperCamelCase( self , lowerCamelCase , lowerCamelCase ):
_snake_case = self.fwd_astar.retrace_path(lowerCamelCase )
_snake_case = self.bwd_astar.retrace_path(lowerCamelCase )
bwd_path.pop()
bwd_path.reverse()
_snake_case = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
__magic_name__ : Optional[int] = (0, 0)
__magic_name__ : Any = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
__magic_name__ : Any = time.time()
__magic_name__ : Optional[int] = AStar(init, goal)
__magic_name__ : str = a_star.search()
__magic_name__ : List[Any] = time.time() - start_time
print(F'AStar execution time = {end_time:f} seconds')
__magic_name__ : List[str] = time.time()
__magic_name__ : Optional[Any] = BidirectionalAStar(init, goal)
__magic_name__ : Optional[int] = time.time() - bd_start_time
print(F'BidirectionalAStar execution time = {bd_end_time:f} seconds')
| 672 | 0 |
import copy
from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
if TYPE_CHECKING:
from ... import PreTrainedTokenizerBase, TensorType
__A = logging.get_logger(__name__)
class _A ( UpperCamelCase ):
"""simple docstring"""
lowerCamelCase : Union[str, Any] = 'vision-encoder-decoder'
lowerCamelCase : int = True
def __init__( self : Union[str, Any] , **__SCREAMING_SNAKE_CASE : int ) -> Dict:
super().__init__(**__SCREAMING_SNAKE_CASE )
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
f'''A configuraton of type {self.model_type} cannot be instantiated because '''
f'''not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}''' )
__UpperCAmelCase =kwargs.pop("""encoder""" )
__UpperCAmelCase =encoder_config.pop("""model_type""" )
__UpperCAmelCase =kwargs.pop("""decoder""" )
__UpperCAmelCase =decoder_config.pop("""model_type""" )
__UpperCAmelCase =AutoConfig.for_model(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =AutoConfig.for_model(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =True
@classmethod
def _a ( cls : List[str] , __SCREAMING_SNAKE_CASE : PretrainedConfig , __SCREAMING_SNAKE_CASE : PretrainedConfig , **__SCREAMING_SNAKE_CASE : str ) -> PretrainedConfig:
logger.info("""Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config""" )
__UpperCAmelCase =True
__UpperCAmelCase =True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **__SCREAMING_SNAKE_CASE )
def _a ( self : Union[str, Any] ) -> Tuple:
__UpperCAmelCase =copy.deepcopy(self.__dict__ )
__UpperCAmelCase =self.encoder.to_dict()
__UpperCAmelCase =self.decoder.to_dict()
__UpperCAmelCase =self.__class__.model_type
return output
class _A ( UpperCamelCase ):
"""simple docstring"""
lowerCamelCase : List[Any] = version.parse('1.11' )
@property
def _a ( self : Tuple ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def _a ( self : Any ) -> float:
return 1e-4
@property
def _a ( self : str ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict({"""last_hidden_state""": {0: """batch""", 1: """encoder_sequence"""}} )
class _A ( UpperCamelCase ):
"""simple docstring"""
@property
def _a ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]:
__UpperCAmelCase =OrderedDict()
__UpperCAmelCase ={0: """batch""", 1: """past_decoder_sequence + sequence"""}
__UpperCAmelCase ={0: """batch""", 1: """past_decoder_sequence + sequence"""}
__UpperCAmelCase ={0: """batch""", 1: """encoder_sequence"""}
return common_inputs
def _a ( self : Tuple , __SCREAMING_SNAKE_CASE : "PreTrainedTokenizerBase" , __SCREAMING_SNAKE_CASE : int = -1 , __SCREAMING_SNAKE_CASE : int = -1 , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : Optional["TensorType"] = None , ) -> Mapping[str, Any]:
import torch
__UpperCAmelCase =OrderedDict()
__UpperCAmelCase =super().generate_dummy_inputs(
__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE , seq_length=__SCREAMING_SNAKE_CASE , is_pair=__SCREAMING_SNAKE_CASE , framework=__SCREAMING_SNAKE_CASE )
__UpperCAmelCase , __UpperCAmelCase =dummy_input["""input_ids"""].shape
__UpperCAmelCase =(batch, encoder_sequence, self._config.encoder_hidden_size)
__UpperCAmelCase =dummy_input.pop("""input_ids""" )
__UpperCAmelCase =dummy_input.pop("""attention_mask""" )
__UpperCAmelCase =torch.zeros(__SCREAMING_SNAKE_CASE )
return common_inputs
class _A ( UpperCamelCase ):
"""simple docstring"""
@property
def _a ( self : List[Any] ) -> None:
pass
def _a ( self : List[Any] , __SCREAMING_SNAKE_CASE : PretrainedConfig ) -> OnnxConfig:
return VisionEncoderDecoderEncoderOnnxConfig(__SCREAMING_SNAKE_CASE )
def _a ( self : Dict , __SCREAMING_SNAKE_CASE : PretrainedConfig , __SCREAMING_SNAKE_CASE : PretrainedConfig , __SCREAMING_SNAKE_CASE : str = "default" ) -> OnnxConfig:
__UpperCAmelCase =encoder_config.hidden_size
return VisionEncoderDecoderDecoderOnnxConfig(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
| 68 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__magic_name__ : int = {
"""configuration_nezha""": ["""NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """NezhaConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : List[Any] = [
"""NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""NezhaForNextSentencePrediction""",
"""NezhaForMaskedLM""",
"""NezhaForPreTraining""",
"""NezhaForMultipleChoice""",
"""NezhaForQuestionAnswering""",
"""NezhaForSequenceClassification""",
"""NezhaForTokenClassification""",
"""NezhaModel""",
"""NezhaPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nezha import (
NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
NezhaPreTrainedModel,
)
else:
import sys
__magic_name__ : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 672 | 0 |
'''simple docstring'''
from __future__ import annotations
a : Optional[Any] = '''Muhammad Umer Farooq'''
a : int = '''MIT'''
a : Dict = '''1.0.0'''
a : Optional[int] = '''Muhammad Umer Farooq'''
a : Optional[Any] = '''contact@muhammadumerfarooq.me'''
a : Union[str, Any] = '''Alpha'''
import re
from html.parser import HTMLParser
from urllib import parse
import requests
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
def __init__( self : Optional[Any] , a_ : str ):
"""simple docstring"""
super().__init__()
__snake_case = []
__snake_case = domain
def A ( self : str , a_ : str , a_ : list[tuple[str, str | None]] ):
"""simple docstring"""
if tag == "a":
# Check the list of defined attributes.
for name, value in attrs:
# If href is defined, and not empty nor # print it.
if name == "href" and value != "#" and value != "":
# If not already in urls.
if value not in self.urls:
__snake_case = parse.urljoin(self.domain , a_ )
self.urls.append(a_ )
def __UpperCAmelCase ( _UpperCAmelCase : str ) -> str:
return ".".join(get_sub_domain_name(_UpperCAmelCase ).split("." )[-2:] )
def __UpperCAmelCase ( _UpperCAmelCase : str ) -> str:
return parse.urlparse(_UpperCAmelCase ).netloc
def __UpperCAmelCase ( _UpperCAmelCase : str = "https://github.com" ) -> list[str]:
__snake_case = get_domain_name(_UpperCAmelCase )
# Initialize the parser
__snake_case = Parser(_UpperCAmelCase )
try:
# Open URL
__snake_case = requests.get(_UpperCAmelCase )
# pass the raw HTML to the parser to get links
parser.feed(r.text )
# Get links and loop through
__snake_case = set()
for link in parser.urls:
# open URL.
# read = requests.get(link)
try:
__snake_case = requests.get(_UpperCAmelCase )
# Get the valid email.
__snake_case = re.findall("[a-zA-Z0-9]+@" + domain , read.text )
# If not in list then append it.
for email in emails:
valid_emails.add(_UpperCAmelCase )
except ValueError:
pass
except ValueError:
raise SystemExit(1 )
# Finally return a sorted list of email addresses with no duplicates.
return sorted(_UpperCAmelCase )
if __name__ == "__main__":
a : Any = emails_from_url('''https://github.com''')
print(F'''{len(emails)} emails found:''')
print('''\n'''.join(sorted(emails)))
| 69 |
'''simple docstring'''
import string
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
_snake_case = ""
for i in sequence:
_snake_case = ord(SCREAMING_SNAKE_CASE__ )
if 65 <= extract <= 90:
output += chr(1_55 - extract )
elif 97 <= extract <= 1_22:
output += chr(2_19 - extract )
else:
output += i
return output
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
_snake_case = string.ascii_letters
_snake_case = string.ascii_lowercase[::-1] + string.ascii_uppercase[::-1]
return "".join(
letters_reversed[letters.index(SCREAMING_SNAKE_CASE__ )] if c in letters else c for c in sequence )
def snake_case_ ( ):
'''simple docstring'''
from timeit import timeit
print("Running performance benchmarks..." )
_snake_case = "from string import printable ; from __main__ import atbash, atbash_slow"
print(f'''> atbash_slow(): {timeit("atbash_slow(printable)" , setup=SCREAMING_SNAKE_CASE__ )} seconds''' )
print(f'''> atbash(): {timeit("atbash(printable)" , setup=SCREAMING_SNAKE_CASE__ )} seconds''' )
if __name__ == "__main__":
for example in ("ABCDEFGH", "123GGjj", "testStringtest", "with space"):
print(F'{example} encrypted in atbash: {atbash(example)}')
benchmark()
| 672 | 0 |
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def _SCREAMING_SNAKE_CASE ( lowercase : Any , lowercase : bool = True , lowercase : float = math.inf , lowercase : float = -math.inf , lowercase : float = math.inf , lowercase : float = -math.inf , lowercase : bool = False , lowercase : float = 1_00 , lowercase : float = 0.01 , lowercase : float = 1 , ):
'''simple docstring'''
lowerCamelCase_ = False
lowerCamelCase_ = search_prob
lowerCamelCase_ = start_temperate
lowerCamelCase_ = []
lowerCamelCase_ = 0
lowerCamelCase_ = None
while not search_end:
lowerCamelCase_ = current_state.score()
if best_state is None or current_score > best_state.score():
lowerCamelCase_ = current_state
scores.append(lowercase )
iterations += 1
lowerCamelCase_ = None
lowerCamelCase_ = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
lowerCamelCase_ = random.randint(0 , len(lowercase ) - 1 ) # picking a random neighbor
lowerCamelCase_ = neighbors.pop(lowercase )
lowerCamelCase_ = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
lowerCamelCase_ = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
lowerCamelCase_ = picked_neighbor
else:
lowerCamelCase_ = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
lowerCamelCase_ = picked_neighbor
lowerCamelCase_ = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
lowerCamelCase_ = True
else:
lowerCamelCase_ = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(lowercase ) , lowercase )
plt.xlabel('Iterations' )
plt.ylabel('Function values' )
plt.show()
return best_state
if __name__ == "__main__":
def _SCREAMING_SNAKE_CASE ( lowercase : Any , lowercase : Optional[Any] ):
'''simple docstring'''
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
lowerCamelCase : Tuple = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
lowerCamelCase : Union[str, Any] = simulated_annealing(
prob, find_max=False, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
"The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 "
F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
# starting the problem with initial coordinates (12, 47)
lowerCamelCase : Optional[Any] = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
lowerCamelCase : Optional[int] = simulated_annealing(
prob, find_max=True, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
"The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 "
F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
def _SCREAMING_SNAKE_CASE ( lowercase : Optional[Any] , lowercase : Dict ):
'''simple docstring'''
return (3 * x**2) - (6 * y)
lowerCamelCase : str = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
lowerCamelCase : Optional[int] = simulated_annealing(prob, find_max=False, visualization=True)
print(
"The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: "
F"""{local_min.score()}"""
)
lowerCamelCase : Tuple = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
lowerCamelCase : str = simulated_annealing(prob, find_max=True, visualization=True)
print(
"The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: "
F"""{local_min.score()}"""
)
| 70 |
'''simple docstring'''
import numpy as np
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return 1 / (1 + np.exp(-vector ))
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return vector * sigmoid(1.702 * vector )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 672 | 0 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_segformer import SegformerImageProcessor
_lowerCamelCase = logging.get_logger(__name__)
class _snake_case (__SCREAMING_SNAKE_CASE):
def __init__( self ,*_snake_case ,**_snake_case ):
warnings.warn(
"The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use SegformerImageProcessor instead." ,_snake_case ,)
super().__init__(*_snake_case ,**_snake_case )
| 71 |
'''simple docstring'''
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('''TEST_SAGEMAKER''' , '''False''' ) ) is not True , reason='''Skipping test because should only be run when releasing minor transformers version''' , )
@pytest.mark.usefixtures('''sm_env''' )
@parameterized_class(
[
{
'''framework''': '''pytorch''',
'''script''': '''run_glue.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.g4dn.xlarge''',
'''results''': {'''train_runtime''': 650, '''eval_accuracy''': 0.6, '''eval_loss''': 0.9},
},
{
'''framework''': '''tensorflow''',
'''script''': '''run_tf.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.g4dn.xlarge''',
'''results''': {'''train_runtime''': 600, '''eval_accuracy''': 0.3, '''eval_loss''': 0.9},
},
] )
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase( self ):
if self.framework == "pytorch":
subprocess.run(
F'''cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'''.split() , encoding="utf-8" , check=lowerCamelCase , )
assert hasattr(self , "env" )
def UpperCamelCase( self , lowerCamelCase=1 ):
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=F'''{self.env.base_job_name}-single''' , instance_count=lowerCamelCase , instance_type=self.instance_type , debugger_hook_config=lowerCamelCase , hyperparameters={**self.env.hyperparameters, "model_name_or_path": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , py_version="py36" , )
def UpperCamelCase( self , lowerCamelCase ):
TrainingJobAnalytics(lowerCamelCase ).export_csv(F'''{self.env.test_path}/{job_name}_metrics.csv''' )
def UpperCamelCase( self ):
# create estimator
_snake_case = self.create_estimator()
# run training
estimator.fit()
# result dataframe
_snake_case = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
_snake_case = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"] )
_snake_case = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
_snake_case = (
Session().describe_training_job(estimator.latest_training_job.name ).get("TrainingTimeInSeconds" , 999_999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy )
assert all(t <= self.results["eval_loss"] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F'''{estimator.latest_training_job.name}.json''' , "w" ) as outfile:
json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss} , lowerCamelCase )
| 672 | 0 |
'''simple docstring'''
from __future__ import annotations
import math
def UpperCamelCase ( lowercase_ : int , lowercase_ : int , lowercase_ : bool , lowercase_ : list[int] , lowercase_ : float ) -> int:
'''simple docstring'''
if depth < 0:
raise ValueError('''Depth cannot be less than 0''' )
if len(lowercase_ ) == 0:
raise ValueError('''Scores cannot be empty''' )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 , node_index * 2 , lowercase_ , lowercase_ , lowercase_ ) , minimax(depth + 1 , node_index * 2 + 1 , lowercase_ , lowercase_ , lowercase_ ) , )
return min(
minimax(depth + 1 , node_index * 2 , lowercase_ , lowercase_ , lowercase_ ) , minimax(depth + 1 , node_index * 2 + 1 , lowercase_ , lowercase_ , lowercase_ ) , )
def UpperCamelCase ( ) -> None:
'''simple docstring'''
lowercase =[9_0, 2_3, 6, 3_3, 2_1, 6_5, 1_2_3, 3_4_4_2_3]
lowercase =math.log(len(lowercase_ ) , 2 )
print('''Optimal value : ''' , end='''''' )
print(minimax(0 , 0 , lowercase_ , lowercase_ , lowercase_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 72 |
'''simple docstring'''
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( __UpperCamelCase ):
'''simple docstring'''
UpperCAmelCase__ : int = DistilBertTokenizer
UpperCAmelCase__ : Union[str, Any] = DistilBertTokenizerFast
UpperCAmelCase__ : List[str] = True
@slow
def UpperCamelCase( self ):
_snake_case = DistilBertTokenizer.from_pretrained("distilbert-base-uncased" )
_snake_case = tokenizer.encode("sequence builders" , add_special_tokens=lowerCamelCase )
_snake_case = tokenizer.encode("multi-sequence build" , add_special_tokens=lowerCamelCase )
_snake_case = tokenizer.build_inputs_with_special_tokens(lowerCamelCase )
_snake_case = tokenizer.build_inputs_with_special_tokens(lowerCamelCase , lowerCamelCase )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 672 | 0 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ : Tuple = logging.get_logger(__name__)
a_ : Optional[int] = {
'facebook/wav2vec2-base-960h': 'https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json',
# See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2
}
class _snake_case ( A__ ):
_lowercase : Union[str, Any] = '''wav2vec2'''
def __init__( self , a=32 , a=768 , a=12 , a=12 , a=3072 , a="gelu" , a=0.1 , a=0.1 , a=0.1 , a=0.0 , a=0.0 , a=0.1 , a=0.1 , a=0.02 , a=1E-5 , a="group" , a="gelu" , a=(512, 512, 512, 512, 512, 512, 512) , a=(5, 2, 2, 2, 2, 2, 2) , a=(10, 3, 3, 3, 3, 2, 2) , a=False , a=128 , a=16 , a=False , a=True , a=0.05 , a=10 , a=2 , a=0.0 , a=10 , a=0 , a=320 , a=2 , a=0.1 , a=100 , a=256 , a=256 , a=0.1 , a="sum" , a=False , a=False , a=256 , a=(512, 512, 512, 512, 1500) , a=(5, 3, 3, 1, 1) , a=(1, 2, 3, 1, 1) , a=512 , a=0 , a=1 , a=2 , a=False , a=3 , a=2 , a=3 , a=None , a=None , **a , ) -> Tuple:
super().__init__(**a , pad_token_id=a , bos_token_id=a , eos_token_id=a)
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = feat_extract_norm
SCREAMING_SNAKE_CASE = feat_extract_activation
SCREAMING_SNAKE_CASE = list(a)
SCREAMING_SNAKE_CASE = list(a)
SCREAMING_SNAKE_CASE = list(a)
SCREAMING_SNAKE_CASE = conv_bias
SCREAMING_SNAKE_CASE = num_conv_pos_embeddings
SCREAMING_SNAKE_CASE = num_conv_pos_embedding_groups
SCREAMING_SNAKE_CASE = len(self.conv_dim)
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = hidden_dropout
SCREAMING_SNAKE_CASE = attention_dropout
SCREAMING_SNAKE_CASE = activation_dropout
SCREAMING_SNAKE_CASE = feat_proj_dropout
SCREAMING_SNAKE_CASE = final_dropout
SCREAMING_SNAKE_CASE = layerdrop
SCREAMING_SNAKE_CASE = layer_norm_eps
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = do_stable_layer_norm
SCREAMING_SNAKE_CASE = use_weighted_layer_sum
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
f''' {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`,'''
f''' `len(config.conv_kernel) = {len(self.conv_kernel)}`.''')
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
SCREAMING_SNAKE_CASE = apply_spec_augment
SCREAMING_SNAKE_CASE = mask_time_prob
SCREAMING_SNAKE_CASE = mask_time_length
SCREAMING_SNAKE_CASE = mask_time_min_masks
SCREAMING_SNAKE_CASE = mask_feature_prob
SCREAMING_SNAKE_CASE = mask_feature_length
SCREAMING_SNAKE_CASE = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
SCREAMING_SNAKE_CASE = num_codevectors_per_group
SCREAMING_SNAKE_CASE = num_codevector_groups
SCREAMING_SNAKE_CASE = contrastive_logits_temperature
SCREAMING_SNAKE_CASE = feat_quantizer_dropout
SCREAMING_SNAKE_CASE = num_negatives
SCREAMING_SNAKE_CASE = codevector_dim
SCREAMING_SNAKE_CASE = proj_codevector_dim
SCREAMING_SNAKE_CASE = diversity_loss_weight
# ctc loss
SCREAMING_SNAKE_CASE = ctc_loss_reduction
SCREAMING_SNAKE_CASE = ctc_zero_infinity
# adapter
SCREAMING_SNAKE_CASE = add_adapter
SCREAMING_SNAKE_CASE = adapter_kernel_size
SCREAMING_SNAKE_CASE = adapter_stride
SCREAMING_SNAKE_CASE = num_adapter_layers
SCREAMING_SNAKE_CASE = output_hidden_size or hidden_size
SCREAMING_SNAKE_CASE = adapter_attn_dim
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
SCREAMING_SNAKE_CASE = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
SCREAMING_SNAKE_CASE = list(a)
SCREAMING_SNAKE_CASE = list(a)
SCREAMING_SNAKE_CASE = list(a)
SCREAMING_SNAKE_CASE = xvector_output_dim
@property
def SCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]:
return functools.reduce(operator.mul , self.conv_stride , 1)
| 73 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__magic_name__ : Optional[int] = {
"""configuration_swiftformer""": [
"""SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""SwiftFormerConfig""",
"""SwiftFormerOnnxConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : Optional[int] = [
"""SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SwiftFormerForImageClassification""",
"""SwiftFormerModel""",
"""SwiftFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
__magic_name__ : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 672 | 0 |
import logging
import os
import quant_trainer
import torch
from torch.utils.data import DataLoader
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput
lowercase_ = logging.getLogger(__name__)
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
def __init__( self : str , *_A : Optional[int] , _A : Optional[int]=None , _A : Dict=None , _A : List[Any]=None , **_A : Optional[Any] ):
"""simple docstring"""
super().__init__(*_A , **_A )
__SCREAMING_SNAKE_CASE : List[str] = eval_examples
__SCREAMING_SNAKE_CASE : Optional[Any] = post_process_function
__SCREAMING_SNAKE_CASE : Tuple = quant_trainer_args
__SCREAMING_SNAKE_CASE : int = 128 # default number of calibration samples
def UpperCAmelCase__ ( self : int , _A : Optional[int]=None ):
"""simple docstring"""
if calib_dataset is None and self.calib_dataset is None:
raise ValueError('''Trainer: calibration requires an calib_dataset.''' )
__SCREAMING_SNAKE_CASE : Tuple = calib_dataset if calib_dataset is not None else self.calib_dataset
__SCREAMING_SNAKE_CASE : str = self._remove_unused_columns(_A , description='''Calibration''' )
return DataLoader(
_A , batch_size=self.args.eval_batch_size , collate_fn=self.data_collator , drop_last=self.args.dataloader_drop_last , num_workers=self.args.dataloader_num_workers , pin_memory=self.args.dataloader_pin_memory , shuffle=_A , )
def UpperCAmelCase__ ( self : Optional[int] , _A : Tuple=None ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = self.train_dataset if calib_dataset is None else calib_dataset
__SCREAMING_SNAKE_CASE : Tuple = self.get_calib_dataloader(_A )
__SCREAMING_SNAKE_CASE : Optional[Any] = self.model
quant_trainer.configure_model(_A , self.quant_trainer_args , calib=_A )
model.eval()
quant_trainer.enable_calibration(_A )
logger.info('''***** Running calibration *****''' )
logger.info(F''' Num examples = {self.calib_num}''' )
logger.info(F''' Batch size = {calib_dataloader.batch_size}''' )
for step, inputs in enumerate(_A ):
# Prediction step
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[str] = self.prediction_step(_A , _A , prediction_loss_only=_A )
if (step + 1) * calib_dataloader.batch_size >= self.calib_num:
break
quant_trainer.finish_calibration(_A , self.quant_trainer_args )
__SCREAMING_SNAKE_CASE : Dict = model
def UpperCAmelCase__ ( self : List[str] , _A : int=None , _A : Optional[Any]=None , _A : List[Any]=None , _A : str = "eval" ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = self.eval_dataset if eval_dataset is None else eval_dataset
__SCREAMING_SNAKE_CASE : str = self.get_eval_dataloader(_A )
__SCREAMING_SNAKE_CASE : List[Any] = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
__SCREAMING_SNAKE_CASE : List[str] = self.compute_metrics
__SCREAMING_SNAKE_CASE : Union[str, Any] = None
__SCREAMING_SNAKE_CASE : Optional[Any] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
__SCREAMING_SNAKE_CASE : Optional[int] = eval_loop(
_A , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_A , )
finally:
__SCREAMING_SNAKE_CASE : List[str] = compute_metrics
if self.post_process_function is not None and self.compute_metrics is not None:
__SCREAMING_SNAKE_CASE : Dict = self.post_process_function(_A , _A , output.predictions )
__SCREAMING_SNAKE_CASE : str = self.compute_metrics(_A )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
__SCREAMING_SNAKE_CASE : Tuple = metrics.pop(_A )
self.log(_A )
else:
__SCREAMING_SNAKE_CASE : str = {}
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
__SCREAMING_SNAKE_CASE : List[str] = self.callback_handler.on_evaluate(self.args , self.state , self.control , _A )
return metrics
def UpperCAmelCase__ ( self : Tuple , _A : int , _A : Optional[int] , _A : str=None , _A : str = "test" ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = self.get_test_dataloader(_A )
# Temporarily disable metric computation, we will do it in the loop here.
__SCREAMING_SNAKE_CASE : str = self.compute_metrics
__SCREAMING_SNAKE_CASE : Dict = None
__SCREAMING_SNAKE_CASE : Tuple = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
__SCREAMING_SNAKE_CASE : Union[str, Any] = eval_loop(
_A , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_A , )
finally:
__SCREAMING_SNAKE_CASE : Tuple = compute_metrics
if self.post_process_function is None or self.compute_metrics is None:
return output
__SCREAMING_SNAKE_CASE : Dict = self.post_process_function(_A , _A , output.predictions , '''predict''' )
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.compute_metrics(_A )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
__SCREAMING_SNAKE_CASE : List[str] = metrics.pop(_A )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=_A )
def UpperCAmelCase__ ( self : Dict , _A : List[Any]="./" ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = self.eval_dataset
__SCREAMING_SNAKE_CASE : int = self.get_eval_dataloader(_A )
__SCREAMING_SNAKE_CASE : str = next(iter(_A ) )
# saving device - to make it consistent
__SCREAMING_SNAKE_CASE : str = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
# convert to tuple
__SCREAMING_SNAKE_CASE : List[Any] = tuple(v.to(_A ) for k, v in batch.items() )
logger.info('''Converting model to be onnx compatible''' )
from pytorch_quantization.nn import TensorQuantizer
__SCREAMING_SNAKE_CASE : Dict = True
__SCREAMING_SNAKE_CASE : Dict = self.model.to(_A )
model.eval()
model.float()
__SCREAMING_SNAKE_CASE : str = model.module if hasattr(_A , '''module''' ) else model
quant_trainer.configure_model(_A , self.quant_trainer_args )
__SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.join(_A , '''model.onnx''' )
logger.info(F'''exporting model to {output_model_file}''' )
__SCREAMING_SNAKE_CASE : List[str] = {0: '''batch_size''', 1: '''seq_len'''}
torch.onnx.export(
_A , _A , _A , export_params=_A , opset_version=13 , do_constant_folding=_A , input_names=['''input_ids''', '''attention_mask''', '''token_type_ids'''] , output_names=['''output_start_logits''', '''output_end_logits'''] , dynamic_axes={
'''input_ids''': axes,
'''attention_mask''': axes,
'''token_type_ids''': axes,
'''output_start_logits''': axes,
'''output_end_logits''': axes,
} , verbose=_A , )
logger.info('''onnx export finished''' )
| 74 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
__magic_name__ : Union[str, Any] = logging.get_logger(__name__)
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=False ):
'''simple docstring'''
_snake_case = "backbone." if is_semantic else ""
_snake_case = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''{prefix}blocks.{i}.norm1.weight''', f'''beit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm1.bias''', f'''beit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(f'''{prefix}blocks.{i}.attn.proj.weight''', f'''beit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append(
(f'''{prefix}blocks.{i}.attn.proj.bias''', f'''beit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm2.weight''', f'''beit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm2.bias''', f'''beit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc1.weight''', f'''beit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc1.bias''', f'''beit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc2.weight''', f'''beit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc2.bias''', f'''beit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
(f'''{prefix}cls_token''', "beit.embeddings.cls_token"),
(f'''{prefix}patch_embed.proj.weight''', "beit.embeddings.patch_embeddings.projection.weight"),
(f'''{prefix}patch_embed.proj.bias''', "beit.embeddings.patch_embeddings.projection.bias"),
(f'''{prefix}pos_embed''', "beit.embeddings.position_embeddings"),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
("mask_token", "beit.embeddings.mask_token"),
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
("fc_norm.weight", "beit.pooler.layernorm.weight"),
("fc_norm.bias", "beit.pooler.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=False ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
_snake_case = "backbone." if is_semantic else ""
# queries, keys and values
_snake_case = state_dict.pop(f'''{prefix}blocks.{i}.attn.qkv.weight''' )
_snake_case = state_dict.pop(f'''{prefix}blocks.{i}.attn.q_bias''' )
_snake_case = state_dict.pop(f'''{prefix}blocks.{i}.attn.v_bias''' )
_snake_case = in_proj_weight[
: config.hidden_size, :
]
_snake_case = q_bias
_snake_case = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_snake_case = in_proj_weight[
-config.hidden_size :, :
]
_snake_case = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
_snake_case = state_dict.pop(f'''{prefix}blocks.{i}.gamma_1''' )
_snake_case = state_dict.pop(f'''{prefix}blocks.{i}.gamma_2''' )
_snake_case = gamma_a
_snake_case = gamma_a
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
_snake_case = dct.pop(SCREAMING_SNAKE_CASE__ )
_snake_case = val
def snake_case_ ( ):
'''simple docstring'''
_snake_case = "http://images.cocodataset.org/val2017/000000039769.jpg"
_snake_case = Image.open(requests.get(SCREAMING_SNAKE_CASE__ , stream=SCREAMING_SNAKE_CASE__ ).raw )
return im
@torch.no_grad()
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False ):
'''simple docstring'''
_snake_case = False if "rvlcdip" in checkpoint_url else True
_snake_case = BeitConfig(use_absolute_position_embeddings=SCREAMING_SNAKE_CASE__ , use_mask_token=SCREAMING_SNAKE_CASE__ )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
_snake_case = 10_24
_snake_case = 40_96
_snake_case = 24
_snake_case = 16
# labels
if "rvlcdip" in checkpoint_url:
_snake_case = 16
_snake_case = "huggingface/label-files"
_snake_case = "rvlcdip-id2label.json"
_snake_case = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , repo_type="dataset" ) , "r" ) )
_snake_case = {int(SCREAMING_SNAKE_CASE__ ): v for k, v in idalabel.items()}
_snake_case = idalabel
_snake_case = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
_snake_case = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE__ , map_location="cpu" )["model"]
_snake_case = create_rename_keys(SCREAMING_SNAKE_CASE__ , has_lm_head=SCREAMING_SNAKE_CASE__ )
for src, dest in rename_keys:
rename_key(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
read_in_q_k_v(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , has_lm_head=SCREAMING_SNAKE_CASE__ )
# load HuggingFace model
_snake_case = BeitForMaskedImageModeling(SCREAMING_SNAKE_CASE__ ) if has_lm_head else BeitForImageClassification(SCREAMING_SNAKE_CASE__ )
model.eval()
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
# Check outputs on an image
_snake_case = BeitImageProcessor(
size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=SCREAMING_SNAKE_CASE__ )
_snake_case = prepare_img()
_snake_case = image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors="pt" )
_snake_case = encoding["pixel_values"]
_snake_case = model(SCREAMING_SNAKE_CASE__ )
_snake_case = outputs.logits
# verify logits
_snake_case = [1, 16] if "rvlcdip" in checkpoint_url else [1, 1_96, 81_92]
assert logits.shape == torch.Size(SCREAMING_SNAKE_CASE__ ), "Shape of logits not as expected"
Path(SCREAMING_SNAKE_CASE__ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE__ )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
if push_to_hub:
if has_lm_head:
_snake_case = "dit-base" if "base" in checkpoint_url else "dit-large"
else:
_snake_case = "dit-base-finetuned-rvlcdip" if "dit-b" in checkpoint_url else "dit-large-finetuned-rvlcdip"
image_processor.push_to_hub(
repo_path_or_name=Path(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , organization="nielsr" , commit_message="Add image processor" , use_temp_dir=SCREAMING_SNAKE_CASE__ , )
model.push_to_hub(
repo_path_or_name=Path(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , organization="nielsr" , commit_message="Add model" , use_temp_dir=SCREAMING_SNAKE_CASE__ , )
if __name__ == "__main__":
__magic_name__ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_url""",
default="""https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth""",
type=str,
help="""URL to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
)
__magic_name__ : Dict = parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 672 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase__ = {'''configuration_sew''': ['''SEW_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''SEWConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
'''SEW_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SEWForCTC''',
'''SEWForSequenceClassification''',
'''SEWModel''',
'''SEWPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_sew import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_sew import (
SEW_PRETRAINED_MODEL_ARCHIVE_LIST,
SEWForCTC,
SEWForSequenceClassification,
SEWModel,
SEWPreTrainedModel,
)
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 75 |
'''simple docstring'''
import math
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not number % 2:
# Negatives, 0, 1 and all even numbers are not primes
return False
_snake_case = range(3 , int(math.sqrt(SCREAMING_SNAKE_CASE__ ) + 1 ) , 2 )
return not any(not number % i for i in odd_numbers )
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=1 , **SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
_snake_case = factor * value
_snake_case = value
while not is_prime(SCREAMING_SNAKE_CASE__ ):
value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1 , **SCREAMING_SNAKE_CASE__ )
return value
| 672 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class UpperCAmelCase_ ( unittest.TestCase ):
def __init__( self , UpperCamelCase_ , UpperCamelCase_=13 , UpperCamelCase_=7 , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=99 , UpperCamelCase_=32 , UpperCamelCase_=5 , UpperCamelCase_=4 , UpperCamelCase_=37 , UpperCamelCase_="gelu" , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=5_12 , UpperCamelCase_=16 , UpperCamelCase_=2 , UpperCamelCase_=0.0_2 , UpperCamelCase_=4 , ) -> Any:
__lowercase : List[Any] = parent
__lowercase : Any = batch_size
__lowercase : Dict = seq_length
__lowercase : List[Any] = is_training
__lowercase : Any = use_attention_mask
__lowercase : Union[str, Any] = use_token_type_ids
__lowercase : int = use_labels
__lowercase : List[Any] = vocab_size
__lowercase : Union[str, Any] = hidden_size
__lowercase : Optional[Any] = num_hidden_layers
__lowercase : Union[str, Any] = num_attention_heads
__lowercase : Dict = intermediate_size
__lowercase : Tuple = hidden_act
__lowercase : Any = hidden_dropout_prob
__lowercase : Any = attention_probs_dropout_prob
__lowercase : List[Any] = max_position_embeddings
__lowercase : Optional[int] = type_vocab_size
__lowercase : Any = type_sequence_label_size
__lowercase : List[Any] = initializer_range
__lowercase : Union[str, Any] = num_choices
def _lowerCamelCase ( self ) -> int:
__lowercase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase : str = None
if self.use_attention_mask:
__lowercase : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase : Tuple = None
if self.use_token_type_ids:
__lowercase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowercase : Any = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase_ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def _lowerCamelCase ( self ) -> Dict:
__lowercase : Dict = self.prepare_config_and_inputs()
__lowercase ,__lowercase ,__lowercase ,__lowercase : List[Any] = config_and_inputs
__lowercase : str = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_flax
class UpperCAmelCase_ ( snake_case , unittest.TestCase ):
UpperCamelCase =True
UpperCamelCase =(
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _lowerCamelCase ( self ) -> Tuple:
__lowercase : int = FlaxRoFormerModelTester(self )
@slow
def _lowerCamelCase ( self ) -> Tuple:
for model_class_name in self.all_model_classes:
__lowercase : Optional[Any] = model_class_name.from_pretrained('''junnyu/roformer_chinese_small''' , from_pt=UpperCamelCase_ )
__lowercase : Dict = model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCamelCase_ )
@require_flax
class UpperCAmelCase_ ( unittest.TestCase ):
@slow
def _lowerCamelCase ( self ) -> List[Any]:
__lowercase : Tuple = FlaxRoFormerForMaskedLM.from_pretrained('''junnyu/roformer_chinese_base''' )
__lowercase : str = jnp.array([[0, 1, 2, 3, 4, 5]] )
__lowercase : List[Any] = model(UpperCamelCase_ )[0]
__lowercase : Optional[int] = 5_00_00
__lowercase : Union[str, Any] = (1, 6, vocab_size)
self.assertEqual(output.shape , UpperCamelCase_ )
__lowercase : List[Any] = jnp.array(
[[[-0.1_2_0_5, -1.0_2_6_5, 0.2_9_2_2], [-1.5_1_3_4, 0.1_9_7_4, 0.1_5_1_9], [-5.0_1_3_5, -3.9_0_0_3, -0.8_4_0_4]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , UpperCamelCase_ , atol=1E-4 ) )
| 76 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__magic_name__ : Dict = {
"""configuration_pix2struct""": [
"""PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""Pix2StructConfig""",
"""Pix2StructTextConfig""",
"""Pix2StructVisionConfig""",
],
"""processing_pix2struct""": ["""Pix2StructProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : List[str] = ["""Pix2StructImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : List[Any] = [
"""PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Pix2StructPreTrainedModel""",
"""Pix2StructForConditionalGeneration""",
"""Pix2StructVisionModel""",
"""Pix2StructTextModel""",
]
if TYPE_CHECKING:
from .configuration_pixastruct import (
PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP,
PixaStructConfig,
PixaStructTextConfig,
PixaStructVisionConfig,
)
from .processing_pixastruct import PixaStructProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_pixastruct import PixaStructImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pixastruct import (
PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST,
PixaStructForConditionalGeneration,
PixaStructPreTrainedModel,
PixaStructTextModel,
PixaStructVisionModel,
)
else:
import sys
__magic_name__ : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 672 | 0 |
"""simple docstring"""
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase ) -> int:
"""simple docstring"""
return number | (1 << position)
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase ) -> int:
"""simple docstring"""
return number & ~(1 << position)
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase ) -> int:
"""simple docstring"""
return number ^ (1 << position)
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase ) -> bool:
"""simple docstring"""
return ((number >> position) & 1) == 1
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase ) -> int:
"""simple docstring"""
return int((number & (1 << position)) != 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 77 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
__magic_name__ : Optional[int] = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( __UpperCamelCase ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = ['''pixel_values''']
def __init__( self , lowerCamelCase = True , lowerCamelCase = None , lowerCamelCase = PILImageResampling.BILINEAR , lowerCamelCase = True , lowerCamelCase = None , lowerCamelCase = True , lowerCamelCase = 1 / 255 , lowerCamelCase = True , lowerCamelCase = None , lowerCamelCase = None , **lowerCamelCase , ):
super().__init__(**lowerCamelCase )
_snake_case = size if size is not None else {"shortest_edge": 256}
_snake_case = get_size_dict(lowerCamelCase , default_to_square=lowerCamelCase )
_snake_case = crop_size if crop_size is not None else {"height": 224, "width": 224}
_snake_case = get_size_dict(lowerCamelCase )
_snake_case = do_resize
_snake_case = size
_snake_case = resample
_snake_case = do_center_crop
_snake_case = crop_size
_snake_case = do_rescale
_snake_case = rescale_factor
_snake_case = do_normalize
_snake_case = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_snake_case = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCamelCase( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = PILImageResampling.BICUBIC , lowerCamelCase = None , **lowerCamelCase , ):
_snake_case = get_size_dict(lowerCamelCase , default_to_square=lowerCamelCase )
if "shortest_edge" not in size:
raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
_snake_case = get_resize_output_image_size(lowerCamelCase , size=size["shortest_edge"] , default_to_square=lowerCamelCase )
return resize(lowerCamelCase , size=lowerCamelCase , resample=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def UpperCamelCase( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase , ):
_snake_case = get_size_dict(lowerCamelCase )
return center_crop(lowerCamelCase , size=(size["height"], size["width"]) , data_format=lowerCamelCase , **lowerCamelCase )
def UpperCamelCase( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase ):
return rescale(lowerCamelCase , scale=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def UpperCamelCase( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase , ):
return normalize(lowerCamelCase , mean=lowerCamelCase , std=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def UpperCamelCase( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = ChannelDimension.FIRST , **lowerCamelCase , ):
_snake_case = do_resize if do_resize is not None else self.do_resize
_snake_case = size if size is not None else self.size
_snake_case = get_size_dict(lowerCamelCase , default_to_square=lowerCamelCase )
_snake_case = resample if resample is not None else self.resample
_snake_case = do_center_crop if do_center_crop is not None else self.do_center_crop
_snake_case = crop_size if crop_size is not None else self.crop_size
_snake_case = get_size_dict(lowerCamelCase )
_snake_case = do_rescale if do_rescale is not None else self.do_rescale
_snake_case = rescale_factor if rescale_factor is not None else self.rescale_factor
_snake_case = do_normalize if do_normalize is not None else self.do_normalize
_snake_case = image_mean if image_mean is not None else self.image_mean
_snake_case = image_std if image_std is not None else self.image_std
_snake_case = make_list_of_images(lowerCamelCase )
if not valid_images(lowerCamelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
_snake_case = [to_numpy_array(lowerCamelCase ) for image in images]
if do_resize:
_snake_case = [self.resize(image=lowerCamelCase , size=lowerCamelCase , resample=lowerCamelCase ) for image in images]
if do_center_crop:
_snake_case = [self.center_crop(image=lowerCamelCase , size=lowerCamelCase ) for image in images]
if do_rescale:
_snake_case = [self.rescale(image=lowerCamelCase , scale=lowerCamelCase ) for image in images]
if do_normalize:
_snake_case = [self.normalize(image=lowerCamelCase , mean=lowerCamelCase , std=lowerCamelCase ) for image in images]
_snake_case = [to_channel_dimension_format(lowerCamelCase , lowerCamelCase ) for image in images]
_snake_case = {"pixel_values": images}
return BatchFeature(data=lowerCamelCase , tensor_type=lowerCamelCase )
| 672 | 0 |
'''simple docstring'''
import unittest
import torch
from torch import nn
from diffusers.models.activations import get_activation
class __A ( unittest.TestCase ):
def _lowercase (self : Optional[Any] ):
UpperCAmelCase_ = get_activation("swish" )
self.assertIsInstance(__a , nn.SiLU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def _lowercase (self : str ):
UpperCAmelCase_ = get_activation("silu" )
self.assertIsInstance(__a , nn.SiLU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def _lowercase (self : str ):
UpperCAmelCase_ = get_activation("mish" )
self.assertIsInstance(__a , nn.Mish )
self.assertEqual(act(torch.tensor(-200 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def _lowercase (self : Optional[int] ):
UpperCAmelCase_ = get_activation("gelu" )
self.assertIsInstance(__a , nn.GELU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
| 78 |
'''simple docstring'''
import baseaa
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return baseaa.aaaencode(string.encode("utf-8" ) )
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return baseaa.aaadecode(SCREAMING_SNAKE_CASE__ ).decode("utf-8" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 672 | 0 |
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE__ : str = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : int = {"""vocab_file""": """vocab.json"""}
SCREAMING_SNAKE_CASE__ : Tuple = {
"""vocab_file""": {
"""mgp-str""": """https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json""",
}
}
SCREAMING_SNAKE_CASE__ : List[Any] = {"""mgp-str""": 27}
class UpperCAmelCase_ ( __lowerCamelCase ):
__lowerCamelCase = VOCAB_FILES_NAMES
__lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , _lowerCAmelCase , _lowerCAmelCase="[GO]" , _lowerCAmelCase="[GO]" , _lowerCAmelCase="[s]" , _lowerCAmelCase="[GO]" , **_lowerCAmelCase ):
super().__init__(
unk_token=_lowerCAmelCase , bos_token=_lowerCAmelCase , eos_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , **_lowerCAmelCase , )
with open(_lowerCAmelCase , encoding="""utf-8""" ) as vocab_handle:
UpperCAmelCase__ : Optional[int] = json.load(_lowerCAmelCase )
UpperCAmelCase__ : Any = {v: k for k, v in self.vocab.items()}
@property
def __UpperCAmelCase ( self ):
return len(self.vocab )
def __UpperCAmelCase ( self ):
return dict(self.vocab , **self.added_tokens_encoder )
def __UpperCAmelCase ( self , _lowerCAmelCase ):
UpperCAmelCase__ : Tuple = []
for s in text:
char_tokens.extend(_lowerCAmelCase )
return char_tokens
def __UpperCAmelCase ( self , _lowerCAmelCase ):
return self.vocab.get(_lowerCAmelCase , self.vocab.get(self.unk_token ) )
def __UpperCAmelCase ( self , _lowerCAmelCase ):
return self.decoder.get(_lowerCAmelCase )
def __UpperCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase = None ):
if not os.path.isdir(_lowerCAmelCase ):
logger.error("""Vocabulary path ({}) should be a directory""".format(_lowerCAmelCase ) )
return
UpperCAmelCase__ : Optional[Any] = os.path.join(
_lowerCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
with open(_lowerCAmelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=_lowerCAmelCase , ensure_ascii=_lowerCAmelCase ) + """\n""" )
return (vocab_file,)
| 79 |
'''simple docstring'''
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 672 | 0 |
from __future__ import annotations
import numpy as np
def snake_case ( lowerCamelCase ):
'''simple docstring'''
return np.maximum(0 , lowerCamelCase )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 80 |
'''simple docstring'''
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class __SCREAMING_SNAKE_CASE ( datasets.BeamBasedBuilder ):
'''simple docstring'''
def UpperCamelCase( self ):
return datasets.DatasetInfo(
features=datasets.Features({"content": datasets.Value("string" )} ) , supervised_keys=lowerCamelCase , )
def UpperCamelCase( self , lowerCamelCase , lowerCamelCase ):
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"examples": get_test_dummy_examples()} )]
def UpperCamelCase( self , lowerCamelCase , lowerCamelCase ):
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(lowerCamelCase )
class __SCREAMING_SNAKE_CASE ( datasets.BeamBasedBuilder ):
'''simple docstring'''
def UpperCamelCase( self ):
return datasets.DatasetInfo(
features=datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string" )} )} ) , supervised_keys=lowerCamelCase , )
def UpperCamelCase( self , lowerCamelCase , lowerCamelCase ):
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"examples": get_test_nested_examples()} )
]
def UpperCamelCase( self , lowerCamelCase , lowerCamelCase ):
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(lowerCamelCase )
def snake_case_ ( ):
'''simple docstring'''
return [(i, {"content": content}) for i, content in enumerate(["foo", "bar", "foobar"] )]
def snake_case_ ( ):
'''simple docstring'''
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(["foo", "bar", "foobar"] )]
class __SCREAMING_SNAKE_CASE ( __UpperCamelCase ):
'''simple docstring'''
@require_beam
def UpperCamelCase( self ):
_snake_case = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
_snake_case = DummyBeamDataset(cache_dir=lowerCamelCase , beam_runner="DirectRunner" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(lowerCamelCase , builder.name , "default" , "0.0.0" , F'''{builder.name}-train.arrow''' ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"content": datasets.Value("string" )} ) )
_snake_case = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , lowerCamelCase )
self.assertEqual(dset["train"].info.splits["train"].num_examples , lowerCamelCase )
self.assertDictEqual(dset["train"][0] , get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset["train"][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(lowerCamelCase , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
@require_beam
def UpperCamelCase( self ):
import apache_beam as beam
_snake_case = beam.io.parquetio.WriteToParquet
_snake_case = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
_snake_case = DummyBeamDataset(cache_dir=lowerCamelCase , beam_runner="DirectRunner" )
with patch("apache_beam.io.parquetio.WriteToParquet" ) as write_parquet_mock:
_snake_case = partial(lowerCamelCase , num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
lowerCamelCase , builder.name , "default" , "0.0.0" , F'''{builder.name}-train-00000-of-00002.arrow''' ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
lowerCamelCase , builder.name , "default" , "0.0.0" , F'''{builder.name}-train-00000-of-00002.arrow''' ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"content": datasets.Value("string" )} ) )
_snake_case = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , lowerCamelCase )
self.assertEqual(dset["train"].info.splits["train"].num_examples , lowerCamelCase )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset["train"]["content"] ) , sorted(["foo", "bar", "foobar"] ) )
self.assertTrue(
os.path.exists(os.path.join(lowerCamelCase , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
@require_beam
def UpperCamelCase( self ):
with tempfile.TemporaryDirectory() as tmp_cache_dir:
_snake_case = DummyBeamDataset(cache_dir=lowerCamelCase )
self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare )
@require_beam
def UpperCamelCase( self ):
_snake_case = len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
_snake_case = NestedBeamDataset(cache_dir=lowerCamelCase , beam_runner="DirectRunner" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(lowerCamelCase , builder.name , "default" , "0.0.0" , F'''{builder.name}-train.arrow''' ) ) )
self.assertDictEqual(
builder.info.features , datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string" )} )} ) )
_snake_case = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , lowerCamelCase )
self.assertEqual(dset["train"].info.splits["train"].num_examples , lowerCamelCase )
self.assertDictEqual(dset["train"][0] , get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset["train"][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(lowerCamelCase , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
| 672 | 0 |
import argparse
import json
import os
import re
import shutil
import torch
from transformers import BioGptConfig, BioGptForCausalLM
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
_snake_case : Optional[int] = 2
class a :
"""simple docstring"""
def __init__( self : int , *, # begin keyword-only arguments
lowerCamelCase : List[Any]="<s>" , lowerCamelCase : List[Any]="<pad>" , lowerCamelCase : Union[str, Any]="</s>" , lowerCamelCase : Any="<unk>" , lowerCamelCase : Optional[Any]=None , ) -> Optional[int]:
__snake_case , __snake_case , __snake_case , __snake_case : Optional[Any] = bos, unk, pad, eos
__snake_case : Tuple = []
__snake_case : Dict = []
__snake_case : List[str] = {}
__snake_case : Tuple = self.add_symbol(lowerCamelCase )
__snake_case : List[Any] = self.add_symbol(lowerCamelCase )
__snake_case : Dict = self.add_symbol(lowerCamelCase )
__snake_case : List[str] = self.add_symbol(lowerCamelCase )
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(lowerCamelCase )
__snake_case : Any = len(self.symbols )
def __eq__( self : Any , lowerCamelCase : Optional[Any] ) -> int:
return self.indices == other.indices
def __getitem__( self : Union[str, Any] , lowerCamelCase : Dict ) -> Any:
if idx < len(self.symbols ):
return self.symbols[idx]
return self.unk_word
def __len__( self : int ) -> Optional[int]:
return len(self.symbols )
def __contains__( self : str , lowerCamelCase : str ) -> Any:
return sym in self.indices
@classmethod
def __snake_case ( cls : Union[str, Any] , lowerCamelCase : Optional[Any] ) -> List[str]:
__snake_case : Optional[Any] = cls()
d.add_from_file(lowerCamelCase )
return d
def __snake_case ( self : List[Any] , lowerCamelCase : Any , lowerCamelCase : List[str]=1 , lowerCamelCase : int=False ) -> Union[str, Any]:
if word in self.indices and not overwrite:
__snake_case : Optional[int] = self.indices[word]
__snake_case : Optional[Any] = self.count[idx] + n
return idx
else:
__snake_case : Dict = len(self.symbols )
__snake_case : List[Any] = idx
self.symbols.append(lowerCamelCase )
self.count.append(lowerCamelCase )
return idx
def __snake_case ( self : Optional[Any] , lowerCamelCase : Tuple ) -> Any:
return 0
def __snake_case ( self : str , lowerCamelCase : Tuple ) -> List[str]:
if isinstance(lowerCamelCase , lowerCamelCase ):
try:
with open(lowerCamelCase , "r" , encoding="utf-8" ) as fd:
self.add_from_file(lowerCamelCase )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception("Incorrect encoding detected in {}, please rebuild the dataset".format(lowerCamelCase ) )
return
__snake_case : Dict = f.readlines()
__snake_case : Dict = self._load_meta(lowerCamelCase )
for line in lines[indices_start_line:]:
try:
__snake_case , __snake_case : Union[str, Any] = line.rstrip().rsplit(" " , 1 )
if field == "#fairseq:overwrite":
__snake_case : int = True
__snake_case , __snake_case : List[str] = line.rsplit(" " , 1 )
else:
__snake_case : Union[str, Any] = False
__snake_case : Union[str, Any] = int(lowerCamelCase )
__snake_case : Dict = line
if word in self and not overwrite:
raise RuntimeError(
"Duplicate word found when loading Dictionary: '{}'. "
"Duplicate words can overwrite earlier ones by adding the "
"#fairseq:overwrite flag at the end of the corresponding row "
"in the dictionary file. If using the Camembert model, please "
"download an updated copy of the model file.".format(lowerCamelCase ) )
self.add_symbol(lowerCamelCase , n=lowerCamelCase , overwrite=lowerCamelCase )
except ValueError:
raise ValueError("Incorrect dictionary format, expected '<token> <cnt> [flags]'" )
def lowerCAmelCase_ ( __lowerCamelCase ):
# (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up,
# e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7}
__snake_case : List[Any] = dict((re.sub(R"@@$" , "" , __lowerCamelCase ), v) if k.endswith("@@" ) else (re.sub(R"$" , "</w>" , __lowerCamelCase ), v) for k, v in d.items() )
__snake_case : str = "<s> <pad> </s> <unk>".split()
# restore the special tokens
for k in keep_keys:
del da[F'{k}</w>']
__snake_case : Optional[int] = d[k] # restore
return da
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase ):
# prep
if not os.path.exists(__lowerCamelCase ):
raise ValueError(F'path {biogpt_checkpoint_path} does not exist!' )
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
print(F'Writing results to {pytorch_dump_folder_path}' )
# handle various types of models
__snake_case : Dict = os.path.join(__lowerCamelCase , "checkpoint.pt" )
if not os.path.isfile(__lowerCamelCase ):
raise ValueError(F'path to the file {checkpoint_file} does not exist!' )
__snake_case : str = torch.load(__lowerCamelCase , map_location="cpu" )
__snake_case : List[Any] = chkpt["cfg"]["model"]
# dicts
__snake_case : str = os.path.join(__lowerCamelCase , "dict.txt" )
if not os.path.isfile(__lowerCamelCase ):
raise ValueError(F'path to the file {dict_file} does not exist!' )
__snake_case : int = Dictionary.load(__lowerCamelCase )
__snake_case : Tuple = rewrite_dict_keys(src_dict.indices )
__snake_case : Tuple = len(__lowerCamelCase )
__snake_case : Optional[Any] = os.path.join(__lowerCamelCase , VOCAB_FILES_NAMES["vocab_file"] )
print(F'Generating {src_vocab_file} of {src_vocab_size} records' )
with open(__lowerCamelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(__lowerCamelCase , ensure_ascii=__lowerCamelCase , indent=__lowerCamelCase ) )
# merges_file (bpecodes)
__snake_case : Dict = os.path.join(__lowerCamelCase , "bpecodes" )
if not os.path.isfile(__lowerCamelCase ):
raise ValueError(F'path to the file {bpecodes_file} does not exist!' )
__snake_case : List[Any] = os.path.join(__lowerCamelCase , VOCAB_FILES_NAMES["merges_file"] )
shutil.copyfile(__lowerCamelCase , __lowerCamelCase )
# model config
__snake_case : List[str] = os.path.join(__lowerCamelCase , "config.json" )
__snake_case : Optional[Any] = {
"activation_dropout": args["activation_dropout"],
"architectures": ["BioGptForCausalLM"],
"attention_probs_dropout_prob": args["attention_dropout"],
"bos_token_id": 0,
"eos_token_id": 2,
"hidden_act": args["activation_fn"],
"hidden_dropout_prob": args["dropout"],
"hidden_size": args["decoder_embed_dim"],
"initializer_range": 0.0_2,
"intermediate_size": args["decoder_ffn_embed_dim"],
"layer_norm_eps": 1e-12,
"layerdrop": args["decoder_layerdrop"],
"max_position_embeddings": args["max_target_positions"],
"model_type": "biogpt",
"num_attention_heads": args["decoder_attention_heads"],
"num_hidden_layers": args["decoder_layers"],
"pad_token_id": 1,
"scale_embedding": not args["no_scale_embedding"],
"tie_word_embeddings": args["share_decoder_input_output_embed"],
"vocab_size": src_vocab_size,
}
# good hparam defaults to start with
print(F'Generating {biogpt_model_config_file}' )
with open(__lowerCamelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(__lowerCamelCase , ensure_ascii=__lowerCamelCase , indent=__lowerCamelCase ) )
# tokenizer config
__snake_case : Tuple = os.path.join(__lowerCamelCase , __lowerCamelCase )
__snake_case : int = {
"bos_token": "<s>",
"eos_token": "</s>",
"model_max_length": 1_0_2_4,
"pad_token": "<pad>",
"special_tokens_map_file": None,
"tokenizer_class": "BioGptTokenizer",
"unk_token": "<unk>",
}
print(F'Generating {biogpt_tokenizer_config_file}' )
with open(__lowerCamelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(__lowerCamelCase , ensure_ascii=__lowerCamelCase , indent=__lowerCamelCase ) )
# model
__snake_case : Dict = chkpt["model"]
# remove unneeded keys
__snake_case : int = [
"decoder.version",
]
for k in ignore_keys:
model_state_dict.pop(__lowerCamelCase , __lowerCamelCase )
__snake_case : Optional[int] = list(model_state_dict.keys() )
for layer_name in layer_names:
if layer_name.endswith("output_projection.weight" ):
__snake_case : str = model_state_dict.pop(__lowerCamelCase )
else:
__snake_case : Any = model_state_dict.pop(__lowerCamelCase )
__snake_case : Dict = BioGptConfig.from_pretrained(__lowerCamelCase )
__snake_case : Any = BioGptForCausalLM(__lowerCamelCase )
# check that it loads ok
model_new.load_state_dict(__lowerCamelCase )
# save
__snake_case : List[str] = os.path.join(__lowerCamelCase , __lowerCamelCase )
print(F'Generating {pytorch_weights_dump_path}' )
torch.save(__lowerCamelCase , __lowerCamelCase )
print("Conversion is done!" )
if __name__ == "__main__":
_snake_case : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--biogpt_checkpoint_path",
default=None,
type=str,
required=True,
help=(
"Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,"
" bpecodes, etc."
),
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
_snake_case : Union[str, Any] = parser.parse_args()
convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
| 81 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
__magic_name__ : Optional[int] = False
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
pass
@slow
@require_torch_gpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase( self ):
_snake_case = VersatileDiffusionImageVariationPipeline.from_pretrained("shi-labs/versatile-diffusion" )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
_snake_case = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" )
_snake_case = torch.manual_seed(0 )
_snake_case = pipe(
image=lowerCamelCase , generator=lowerCamelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" , ).images
_snake_case = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
_snake_case = np.array([0.0441, 0.0469, 0.0507, 0.0575, 0.0632, 0.0650, 0.0865, 0.0909, 0.0945] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 672 | 0 |
"""simple docstring"""
import warnings
warnings.warn(
"""memory_utils has been reorganized to utils.memory. Import `find_executable_batchsize` from the main `__init__`: """
"""`from accelerate import find_executable_batch_size` to avoid this warning.""",
FutureWarning,
)
| 82 |
'''simple docstring'''
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.text import TextDatasetReader
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
_snake_case = tmp_path / "cache"
_snake_case = {"text": "string"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_snake_case = TextDatasetReader(SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ , keep_in_memory=SCREAMING_SNAKE_CASE__ ).read()
_check_text_dataset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@pytest.mark.parametrize(
"features" , [
None,
{"text": "string"},
{"text": "int32"},
{"text": "float32"},
] , )
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
_snake_case = tmp_path / "cache"
_snake_case = {"text": "string"}
_snake_case = features.copy() if features else default_expected_features
_snake_case = (
Features({feature: Value(SCREAMING_SNAKE_CASE__ ) for feature, dtype in features.items()} ) if features is not None else None
)
_snake_case = TextDatasetReader(SCREAMING_SNAKE_CASE__ , features=SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ ).read()
_check_text_dataset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
_snake_case = tmp_path / "cache"
_snake_case = {"text": "string"}
_snake_case = TextDatasetReader(SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ , split=SCREAMING_SNAKE_CASE__ ).read()
_check_text_dataset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type" , [str, list] )
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if issubclass(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
_snake_case = text_path
elif issubclass(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
_snake_case = [text_path]
_snake_case = tmp_path / "cache"
_snake_case = {"text": "string"}
_snake_case = TextDatasetReader(SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ ).read()
_check_text_dataset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=("train",) ):
'''simple docstring'''
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
for split in splits:
_snake_case = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
_snake_case = tmp_path / "cache"
_snake_case = {"text": "string"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_snake_case = TextDatasetReader({"train": text_path} , cache_dir=SCREAMING_SNAKE_CASE__ , keep_in_memory=SCREAMING_SNAKE_CASE__ ).read()
_check_text_datasetdict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@pytest.mark.parametrize(
"features" , [
None,
{"text": "string"},
{"text": "int32"},
{"text": "float32"},
] , )
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
_snake_case = tmp_path / "cache"
# CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
_snake_case = {"text": "string"}
_snake_case = features.copy() if features else default_expected_features
_snake_case = (
Features({feature: Value(SCREAMING_SNAKE_CASE__ ) for feature, dtype in features.items()} ) if features is not None else None
)
_snake_case = TextDatasetReader({"train": text_path} , features=SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ ).read()
_check_text_datasetdict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if split:
_snake_case = {split: text_path}
else:
_snake_case = "train"
_snake_case = {"train": text_path, "test": text_path}
_snake_case = tmp_path / "cache"
_snake_case = {"text": "string"}
_snake_case = TextDatasetReader(SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ ).read()
_check_text_datasetdict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
| 672 | 0 |
"""simple docstring"""
from __future__ import annotations
import math
def snake_case_ ( A_ : float, A_ : int ):
'''simple docstring'''
_lowerCamelCase : int = u
for i in range(1, A_ ):
_lowerCamelCase : int = temp * (u - i)
return temp
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : int = int(input('''enter the numbers of values: ''' ) )
_lowerCamelCase : list[list[float]] = []
for _ in range(A_ ):
y.append([] )
for i in range(A_ ):
for j in range(A_ ):
y[i].append(A_ )
_lowerCamelCase : str = 0
print('''enter the values of parameters in a list: ''' )
_lowerCamelCase : Tuple = list(map(A_, input().split() ) )
print('''enter the values of corresponding parameters: ''' )
for i in range(A_ ):
_lowerCamelCase : Any = float(input() )
_lowerCamelCase : Any = int(input('''enter the value to interpolate: ''' ) )
_lowerCamelCase : Optional[Any] = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1, A_ ):
for j in range(n - i ):
_lowerCamelCase : Union[str, Any] = y[j + 1][i - 1] - y[j][i - 1]
_lowerCamelCase : str = y[0][0]
for i in range(1, A_ ):
summ += (ucal(A_, A_ ) * y[0][i]) / math.factorial(A_ )
print(F'''the value at {value} is {summ}''' )
if __name__ == "__main__":
main()
| 83 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__magic_name__ : Any = {
"""configuration_swinv2""": ["""SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Swinv2Config"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : Dict = [
"""SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Swinv2ForImageClassification""",
"""Swinv2ForMaskedImageModeling""",
"""Swinv2Model""",
"""Swinv2PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swinva import (
SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinvaForImageClassification,
SwinvaForMaskedImageModeling,
SwinvaModel,
SwinvaPreTrainedModel,
)
else:
import sys
__magic_name__ : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 672 | 0 |
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import platform
import numpy as np
import psutil
import torch
from accelerate import __version__ as version
from accelerate.commands.config import default_config_file, load_config_from_file
from ..utils import is_npu_available, is_xpu_available
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE=None ):
if subparsers is not None:
lowercase = subparsers.add_parser('env' )
else:
lowercase = argparse.ArgumentParser('Accelerate env command' )
parser.add_argument(
'--config_file' , default=__SCREAMING_SNAKE_CASE , help='The config file to use for the default values in the launching script.' )
if subparsers is not None:
parser.set_defaults(func=__SCREAMING_SNAKE_CASE )
return parser
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
lowercase = torch.__version__
lowercase = torch.cuda.is_available()
lowercase = is_xpu_available()
lowercase = is_npu_available()
lowercase = 'Not found'
# Get the default from the config file.
if args.config_file is not None or os.path.isfile(__SCREAMING_SNAKE_CASE ):
lowercase = load_config_from_file(args.config_file ).to_dict()
lowercase = {
'`Accelerate` version': version,
'Platform': platform.platform(),
'Python version': platform.python_version(),
'Numpy version': np.__version__,
'PyTorch version (GPU?)': F'''{pt_version} ({pt_cuda_available})''',
'PyTorch XPU available': str(__SCREAMING_SNAKE_CASE ),
'PyTorch NPU available': str(__SCREAMING_SNAKE_CASE ),
'System RAM': F'''{psutil.virtual_memory().total / 1024 ** 3:.2f} GB''',
}
if pt_cuda_available:
lowercase = torch.cuda.get_device_name()
print('\nCopy-and-paste the text below in your GitHub issue\n' )
print('\n'.join([F'''- {prop}: {val}''' for prop, val in info.items()] ) )
print('- `Accelerate` default config:' if args.config_file is None else '- `Accelerate` config passed:' )
lowercase = (
'\n'.join([F'''\t- {prop}: {val}''' for prop, val in accelerate_config.items()] )
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
else F'''\t{accelerate_config}'''
)
print(__SCREAMING_SNAKE_CASE )
lowercase = accelerate_config
return info
def UpperCAmelCase_ ( ):
lowercase = env_command_parser()
lowercase = parser.parse_args()
env_command(__SCREAMING_SNAKE_CASE )
return 0
if __name__ == "__main__":
raise SystemExit(main())
| 84 |
'''simple docstring'''
import math
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return math.pow(SCREAMING_SNAKE_CASE__ , 2 ) - a
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return 2 * x
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
_snake_case = 2.0
while start <= a:
_snake_case = math.pow(SCREAMING_SNAKE_CASE__ , 2 )
return start
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = 99_99 , SCREAMING_SNAKE_CASE__ = 0.00000000000001 ):
'''simple docstring'''
if a < 0:
raise ValueError("math domain error" )
_snake_case = get_initial_point(SCREAMING_SNAKE_CASE__ )
for _ in range(SCREAMING_SNAKE_CASE__ ):
_snake_case = value
_snake_case = value - fx(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) / fx_derivative(SCREAMING_SNAKE_CASE__ )
if abs(prev_value - value ) < tolerance:
return value
return value
if __name__ == "__main__":
from doctest import testmod
testmod()
| 672 | 0 |
def _a ( lowercase__ : Tuple , lowercase__ : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = [1]
for i in range(2 , lowercase__ ):
factorials.append(factorials[-1] * i )
assert 0 <= k < factorials[-1] * n, "k out of bounds"
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
SCREAMING_SNAKE_CASE__ : str = list(range(lowercase__ ) )
# Find permutation
while factorials:
SCREAMING_SNAKE_CASE__ : Optional[int] = factorials.pop()
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] = divmod(lowercase__ , lowercase__ )
permutation.append(elements[number] )
elements.remove(elements[number] )
permutation.append(elements[0] )
return permutation
if __name__ == "__main__":
import doctest
doctest.testmod()
| 85 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ : Optional[int] = logging.get_logger(__name__)
__magic_name__ : Optional[int] = {
"""microsoft/git-base""": """https://huggingface.co/microsoft/git-base/resolve/main/config.json""",
}
class __SCREAMING_SNAKE_CASE ( __UpperCamelCase ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = '''git_vision_model'''
def __init__( self , lowerCamelCase=768 , lowerCamelCase=3_072 , lowerCamelCase=12 , lowerCamelCase=12 , lowerCamelCase=3 , lowerCamelCase=224 , lowerCamelCase=16 , lowerCamelCase="quick_gelu" , lowerCamelCase=1e-5 , lowerCamelCase=0.0 , lowerCamelCase=0.02 , **lowerCamelCase , ):
super().__init__(**lowerCamelCase )
_snake_case = hidden_size
_snake_case = intermediate_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = num_channels
_snake_case = patch_size
_snake_case = image_size
_snake_case = initializer_range
_snake_case = attention_dropout
_snake_case = layer_norm_eps
_snake_case = hidden_act
@classmethod
def UpperCamelCase( cls , lowerCamelCase , **lowerCamelCase ):
cls._set_token_in_kwargs(lowerCamelCase )
_snake_case , _snake_case = cls.get_config_dict(lowerCamelCase , **lowerCamelCase )
# get the vision config dict if we are loading from GITConfig
if config_dict.get("model_type" ) == "git":
_snake_case = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(lowerCamelCase , **lowerCamelCase )
class __SCREAMING_SNAKE_CASE ( __UpperCamelCase ):
'''simple docstring'''
UpperCAmelCase__ : int = '''git'''
def __init__( self , lowerCamelCase=None , lowerCamelCase=30_522 , lowerCamelCase=768 , lowerCamelCase=6 , lowerCamelCase=12 , lowerCamelCase=3_072 , lowerCamelCase="gelu" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=1_024 , lowerCamelCase=0.02 , lowerCamelCase=1e-12 , lowerCamelCase=0 , lowerCamelCase="absolute" , lowerCamelCase=True , lowerCamelCase=False , lowerCamelCase=101 , lowerCamelCase=102 , lowerCamelCase=None , **lowerCamelCase , ):
super().__init__(bos_token_id=lowerCamelCase , eos_token_id=lowerCamelCase , pad_token_id=lowerCamelCase , **lowerCamelCase )
if vision_config is None:
_snake_case = {}
logger.info("vision_config is None. initializing the GitVisionConfig with default values." )
_snake_case = GitVisionConfig(**lowerCamelCase )
_snake_case = vocab_size
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = hidden_act
_snake_case = intermediate_size
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = max_position_embeddings
_snake_case = initializer_range
_snake_case = layer_norm_eps
_snake_case = position_embedding_type
_snake_case = use_cache
_snake_case = tie_word_embeddings
_snake_case = num_image_with_embedding
_snake_case = bos_token_id
_snake_case = eos_token_id
def UpperCamelCase( self ):
_snake_case = copy.deepcopy(self.__dict__ )
_snake_case = self.vision_config.to_dict()
_snake_case = self.__class__.model_type
return output
| 672 | 0 |
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__a :Optional[Any] = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class _a ( snake_case_ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase : int = XGLMTokenizer
_lowerCamelCase : Union[str, Any] = XGLMTokenizerFast
_lowerCamelCase : Dict = True
_lowerCamelCase : Optional[Any] = True
def __A ( self : Tuple ):
super().setUp()
# We have a SentencePiece fixture for testing
A_ = XGLMTokenizer(UpperCAmelCase , keep_accents=UpperCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def __A ( self : Dict ):
A_ = "<pad>"
A_ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase ) , UpperCAmelCase )
def __A ( self : Optional[int] ):
A_ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(len(UpperCAmelCase ) , 1008 )
def __A ( self : int ):
self.assertEqual(self.get_tokenizer().vocab_size , 1008 )
def __A ( self : Dict ):
A_ = XGLMTokenizer(UpperCAmelCase , keep_accents=UpperCAmelCase )
A_ = tokenizer.tokenize("This is a test" )
self.assertListEqual(UpperCAmelCase , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCAmelCase ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
A_ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
UpperCAmelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
A_ = tokenizer.convert_tokens_to_ids(UpperCAmelCase )
self.assertListEqual(
UpperCAmelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
A_ = tokenizer.convert_ids_to_tokens(UpperCAmelCase )
self.assertListEqual(
UpperCAmelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
@cached_property
def __A ( self : List[Any] ):
return XGLMTokenizer.from_pretrained("facebook/xglm-564M" )
def __A ( self : Union[str, Any] ):
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(UpperCAmelCase , f.name )
A_ = XGLMTokenizer(f.name , keep_accents=UpperCAmelCase )
A_ = pickle.dumps(UpperCAmelCase )
pickle.loads(UpperCAmelCase )
def __A ( self : str ):
if not self.test_rust_tokenizer:
return
A_ = self.get_tokenizer()
A_ = self.get_rust_tokenizer()
A_ = "I was born in 92000, and this is falsé."
A_ = tokenizer.tokenize(UpperCAmelCase )
A_ = rust_tokenizer.tokenize(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
A_ = tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
A_ = rust_tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
A_ = self.get_rust_tokenizer()
A_ = tokenizer.encode(UpperCAmelCase )
A_ = rust_tokenizer.encode(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
@slow
def __A ( self : Optional[int] ):
A_ = "Hello World!"
A_ = [2, 31227, 4447, 35]
self.assertListEqual(UpperCAmelCase , self.big_tokenizer.encode(UpperCAmelCase ) )
@slow
def __A ( self : Any ):
A_ = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth"
)
# fmt: off
A_ = [2, 1018, 67, 11, 1988, 2617, 5631, 278, 11, 3407, 48, 71630, 28085, 4, 3234, 157, 13, 6, 5, 6, 4, 3526, 768, 15, 659, 57, 298, 3983, 864, 129, 21, 6, 5, 13675, 377, 652, 7580, 10341, 155, 2817, 422, 1666, 7, 1674, 53, 113, 202277, 17892, 33, 60, 87, 4, 3234, 157, 61, 2667, 52376, 19, 88, 23, 735]
# fmt: on
self.assertListEqual(UpperCAmelCase , self.big_tokenizer.encode(UpperCAmelCase ) )
@slow
def __A ( self : Any ):
# fmt: off
A_ = {
"input_ids": [[2, 108825, 1163, 15, 88010, 473, 15898, 157, 13672, 1857, 312, 8, 238021, 1163, 53, 13672, 1857, 312, 8, 53283, 182396, 8, 18566, 16, 36733, 4101, 8, 230, 244017, 122553, 7, 15, 132597, 4, 293, 12511, 7610, 4, 3414, 132597, 9, 4, 32361, 362, 4, 734, 28512, 32569, 18, 4, 32361, 26096, 14982, 73, 18715, 21433, 235261, 15, 492, 12427, 16, 53, 18715, 21433, 65454, 15, 23659, 563, 16, 278, 597, 2843, 595, 7931, 182396, 64186, 22, 886, 595, 132981, 53, 25540, 3449, 43982, 39901, 5951, 878, 330, 4, 27694, 80269, 312, 53, 6517, 11780, 611, 20408, 5], [2, 6, 132597, 67, 42897, 33, 592, 8, 163729, 25540, 361, 136997, 109514, 173230, 7, 501, 60, 102913, 196, 5631, 235, 63243, 473, 6, 231757, 74, 5277, 7905, 53, 3095, 37317, 22, 454, 183874, 5], [2, 268, 31298, 46530, 6, 132935, 43831, 7, 597, 32, 24, 3688, 9865, 5]],
"attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase , model_name="facebook/xglm-564M" , padding=UpperCAmelCase , ) | 86 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import tensorflow as tf
from .utils import logging
__magic_name__ : Dict = logging.get_logger(__name__)
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if isinstance(SCREAMING_SNAKE_CASE__ , np.ndarray ):
return list(tensor.shape )
_snake_case = tf.shape(SCREAMING_SNAKE_CASE__ )
if tensor.shape == tf.TensorShape(SCREAMING_SNAKE_CASE__ ):
return dynamic
_snake_case = tensor.shape.as_list()
return [dynamic[i] if s is None else s for i, s in enumerate(SCREAMING_SNAKE_CASE__ )]
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None ):
'''simple docstring'''
return tf.nn.softmax(logits=logits + 1E-9 , axis=SCREAMING_SNAKE_CASE__ , name=SCREAMING_SNAKE_CASE__ )
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=1E-5 , SCREAMING_SNAKE_CASE__=-1 ):
'''simple docstring'''
if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
raise NotImplementedError("Only 1D weight and bias tensors are supported for now, with only a single axis." )
# Get mean and variance on the axis to be normalized
_snake_case , _snake_case = tf.nn.moments(SCREAMING_SNAKE_CASE__ , axes=[axis] , keepdims=SCREAMING_SNAKE_CASE__ )
if axis != -1:
# Reshape scale and weight to have the same rank as inputs, but with 1 dimensions
# on every dimension except axis
_snake_case = [1] * inputs.shape.rank
_snake_case = shape_list(SCREAMING_SNAKE_CASE__ )[axis]
_snake_case = tf.reshape(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
_snake_case = tf.reshape(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Compute layer normalization using the batch_normalization
# function.
_snake_case = tf.nn.batch_normalization(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , offset=SCREAMING_SNAKE_CASE__ , scale=SCREAMING_SNAKE_CASE__ , variance_epsilon=SCREAMING_SNAKE_CASE__ , )
return outputs
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=0 , SCREAMING_SNAKE_CASE__=-1 ):
'''simple docstring'''
if end_dim < 0:
end_dim += input.shape.rank
if start_dim < 0:
start_dim += input.shape.rank
if start_dim == end_dim:
return input
_snake_case = tf.shape(SCREAMING_SNAKE_CASE__ )
_snake_case = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] )
_snake_case = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] , axis=0 )
return tf.reshape(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE__ , tf.Tensor ):
_snake_case = tf.convert_to_tensor(SCREAMING_SNAKE_CASE__ ) # Catches stray NumPy inputs
if encoder_attention_mask.shape.rank == 3:
_snake_case = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.shape.rank == 2:
_snake_case = encoder_attention_mask[:, None, None, :]
# T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
# Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow
# /transformer/transformer_layers.py#L270
# encoder_extended_attention_mask = (encoder_extended_attention_mask ==
# encoder_extended_attention_mask.transpose(-1, -2))
_snake_case = (
tf.cast(1 , encoder_attention_mask.dtype ) - encoder_extended_attention_mask
) * encoder_extended_attention_mask.dtype.min
return encoder_extended_attention_mask
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = "input_ids" ):
'''simple docstring'''
tf.debugging.assert_less(
SCREAMING_SNAKE_CASE__ , tf.cast(SCREAMING_SNAKE_CASE__ , dtype=tensor.dtype ) , message=(
f'''The maximum value of {tensor_name} ({tf.math.reduce_max(SCREAMING_SNAKE_CASE__ )}) must be smaller than the embedding '''
f'''layer\'s input dimension ({embed_dim}). The likely cause is some problem at tokenization time.'''
) , )
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
_snake_case = 6_45_12
# Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT`
# because in that case even chunking the array would not make the saving
# possible.
_snake_case = [x for x in data if len(SCREAMING_SNAKE_CASE__ ) > HDF5_OBJECT_HEADER_LIMIT]
# Expecting this to never be true.
if bad_attributes:
raise RuntimeError(
"The following attributes cannot be saved to HDF5 file because "
f'''they are larger than {HDF5_OBJECT_HEADER_LIMIT} '''
f'''bytes: {bad_attributes}''' )
_snake_case = np.asarray(SCREAMING_SNAKE_CASE__ )
_snake_case = 1
_snake_case = np.array_split(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# This will never loop forever thanks to the test above.
while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ):
num_chunks += 1
_snake_case = np.array_split(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if num_chunks > 1:
for chunk_id, chunk_data in enumerate(SCREAMING_SNAKE_CASE__ ):
_snake_case = chunk_data
else:
_snake_case = data
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if name in group.attrs:
_snake_case = [n.decode("utf8" ) if hasattr(SCREAMING_SNAKE_CASE__ , "decode" ) else n for n in group.attrs[name]]
else:
_snake_case = []
_snake_case = 0
while "%s%d" % (name, chunk_id) in group.attrs:
data.extend(
[n.decode("utf8" ) if hasattr(SCREAMING_SNAKE_CASE__ , "decode" ) else n for n in group.attrs["%s%d" % (name, chunk_id)]] )
chunk_id += 1
return data
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def _expand_single_ad_tensor(SCREAMING_SNAKE_CASE__ ):
if isinstance(SCREAMING_SNAKE_CASE__ , tf.Tensor ) and t.shape.rank == 1:
return tf.expand_dims(SCREAMING_SNAKE_CASE__ , axis=-1 )
return t
return tf.nest.map_structure(_expand_single_ad_tensor , SCREAMING_SNAKE_CASE__ )
| 672 | 0 |
from __future__ import annotations
_lowerCamelCase : int = [True] * 1000001
_lowerCamelCase : Optional[int] = 2
while i * i <= 1000000:
if seive[i]:
for j in range(i * i, 1000001, i):
_lowerCamelCase : Optional[int] = False
i += 1
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> bool:
"""simple docstring"""
return seive[n]
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> bool:
"""simple docstring"""
return any(digit in '''02468''' for digit in str(lowercase_ ) )
def SCREAMING_SNAKE_CASE ( lowercase_ = 1_000_000 ) -> list[int]:
"""simple docstring"""
A__ = [2] # result already includes the number 2.
for num in range(3 , limit + 1 , 2 ):
if is_prime(lowercase_ ) and not contains_an_even_digit(lowercase_ ):
A__ = str(lowercase_ )
A__ = [int(str_num[j:] + str_num[:j] ) for j in range(len(lowercase_ ) )]
if all(is_prime(lowercase_ ) for i in list_nums ):
result.append(lowercase_ )
return result
def SCREAMING_SNAKE_CASE ( ) -> int:
"""simple docstring"""
return len(find_circular_primes() )
if __name__ == "__main__":
print(F'''{len(find_circular_primes()) = }''')
| 87 |
'''simple docstring'''
__magic_name__ : int = """Alexander Joslin"""
import operator as op
from .stack import Stack
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
_snake_case = {"*": op.mul, "/": op.truediv, "+": op.add, "-": op.sub}
_snake_case = Stack()
_snake_case = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(SCREAMING_SNAKE_CASE__ ) )
elif i in operators:
# RULE 2
operator_stack.push(SCREAMING_SNAKE_CASE__ )
elif i == ")":
# RULE 4
_snake_case = operator_stack.peek()
operator_stack.pop()
_snake_case = operand_stack.peek()
operand_stack.pop()
_snake_case = operand_stack.peek()
operand_stack.pop()
_snake_case = operators[opr](SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
operand_stack.push(SCREAMING_SNAKE_CASE__ )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
__magic_name__ : List[str] = """(5 + ((4 * 2) * (2 + 3)))"""
# answer = 45
print(F'{equation} = {dijkstras_two_stack_algorithm(equation)}')
| 672 | 0 |
"""simple docstring"""
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowercase__ ( A_ ):
__UpperCAmelCase = ['''image_processor''', '''tokenizer''']
__UpperCAmelCase = '''AutoImageProcessor'''
__UpperCAmelCase = '''AutoTokenizer'''
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) -> str:
super().__init__(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)
_lowerCamelCase : Optional[int] = self.image_processor
def __call__( self , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , **SCREAMING_SNAKE_CASE) -> str:
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""")
if text is not None:
_lowerCamelCase : str = self.tokenizer(SCREAMING_SNAKE_CASE , return_tensors=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE)
if images is not None:
_lowerCamelCase : Dict = self.image_processor(SCREAMING_SNAKE_CASE , return_tensors=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE)
if text is not None and images is not None:
_lowerCamelCase : str = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**SCREAMING_SNAKE_CASE) , tensor_type=SCREAMING_SNAKE_CASE)
def UpperCamelCase_ ( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE) -> List[str]:
return self.tokenizer.batch_decode(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE)
def UpperCamelCase_ ( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE) -> Tuple:
return self.tokenizer.decode(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE)
@property
def UpperCamelCase_ ( self) -> int:
return ["input_ids", "attention_mask", "pixel_values"]
| 88 |
'''simple docstring'''
from torch import nn
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(f'''Unsupported activation function: {act_fn}''' )
| 672 | 0 |
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
SCREAMING_SNAKE_CASE : Optional[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : List[Any] = {"vocab_file": "vocab.txt", "emoji_file": "emoji.json"}
SCREAMING_SNAKE_CASE : List[str] = {
"vocab_file": {
"abeja/gpt-neox-japanese-2.7b": "https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt",
},
"emoji_file": {
"abeja/gpt-neox-japanese-2.7b": "https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json",
},
}
SCREAMING_SNAKE_CASE : Any = {
"abeja/gpt-neox-japanese-2.7b": 2048,
}
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> Dict:
with open(lowerCamelCase_ , 'r' , encoding='utf-8' ) as f:
_lowercase : int = json.loads(f.read() )
_lowercase : Optional[Any] = collections.OrderedDict()
_lowercase : Any = collections.OrderedDict()
_lowercase : Optional[Any] = collections.OrderedDict()
with open(lowerCamelCase_ , 'r' , encoding='utf-8' ) as f:
_lowercase : List[str] = f.readlines()
_lowercase : List[str] = [[t.rstrip('\n' )] if (t == ',' or ',' not in t) else t.rstrip('\n' ).split(',' ) for t in token]
for idx, b in enumerate(lowerCamelCase_ ):
_lowercase : Optional[Any] = b
_lowercase : Dict = idx
for wd in b:
_lowercase : Optional[int] = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class _lowerCamelCase( _a ):
lowercase_ : str = VOCAB_FILES_NAMES
lowercase_ : List[str] = PRETRAINED_VOCAB_FILES_MAP
lowercase_ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ : Union[str, Any] = ["""input_ids""", """attention_mask"""]
def __init__( self, lowerCamelCase, lowerCamelCase, lowerCamelCase="<|endoftext|>", lowerCamelCase="<|endoftext|>", lowerCamelCase="<|startoftext|>", lowerCamelCase="<|endoftext|>", lowerCamelCase=False, **lowerCamelCase, ) -> Optional[int]:
"""simple docstring"""
super().__init__(
unk_token=lowerCamelCase, pad_token=lowerCamelCase, bos_token=lowerCamelCase, eos_token=lowerCamelCase, do_clean_text=lowerCamelCase, **lowerCamelCase, )
if not os.path.isfile(lowerCamelCase):
raise ValueError(
F'''Can\'t find a vocabulary file at path \'{vocab_file}\'. To load the vocabulary from a Google pretrained'''
' model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`')
if not os.path.isfile(lowerCamelCase):
raise ValueError(
F'''Can\'t find a emoji file at path \'{emoji_file}\'. To load the emoji information from a Google'''
' pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`')
_lowercase : Tuple = do_clean_text
_lowercase , _lowercase , _lowercase , _lowercase : Dict = load_vocab_and_emoji(lowerCamelCase, lowerCamelCase)
_lowercase : str = SubWordJapaneseTokenizer(
vocab=self.vocab, ids_to_tokens=self.ids_to_tokens, emoji=self.emoji)
@property
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
return len(self.raw_vocab)
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
return dict(self.raw_vocab, **self.added_tokens_encoder)
def UpperCamelCase ( self, lowerCamelCase) -> Optional[Any]:
"""simple docstring"""
return self.subword_tokenizer.tokenize(lowerCamelCase, clean=self.do_clean_text)
def UpperCamelCase ( self, lowerCamelCase) -> Tuple:
"""simple docstring"""
return self.vocab.get(lowerCamelCase, self.vocab.get(self.unk_token))
def UpperCamelCase ( self, lowerCamelCase) -> Tuple:
"""simple docstring"""
return self.subword_tokenizer.convert_id_to_token(lowerCamelCase)
def UpperCamelCase ( self, lowerCamelCase) -> Optional[int]:
"""simple docstring"""
_lowercase : str = ''.join(lowerCamelCase).strip()
return out_string
def UpperCamelCase ( self, lowerCamelCase) -> List[int]:
"""simple docstring"""
_lowercase : str = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowerCamelCase, add_special_tokens=lowerCamelCase) + [self.eos_token_id])
if len(lowerCamelCase) > self.model_max_length:
_lowercase : List[str] = input_ids[-self.model_max_length :]
return input_ids
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase = None) -> Tuple[str]:
"""simple docstring"""
_lowercase : Tuple = 0
if os.path.isdir(lowerCamelCase):
_lowercase : int = os.path.join(
lowerCamelCase, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
_lowercase : Union[str, Any] = os.path.join(
lowerCamelCase, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['emoji_file'])
else:
_lowercase : Dict = (
(filename_prefix + '-' if filename_prefix else '') + save_directory + VOCAB_FILES_NAMES['vocab_file']
)
_lowercase : str = (
(filename_prefix + '-' if filename_prefix else '') + save_directory + VOCAB_FILES_NAMES['emoji_file']
)
with open(lowerCamelCase, 'w', encoding='utf-8') as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
F'''Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'''
' Please check that the vocabulary is not corrupted!')
_lowercase : Union[str, Any] = token_index
writer.write(','.join(lowerCamelCase) + '\n')
index += 1
with open(lowerCamelCase, 'w', encoding='utf-8') as writer:
json.dump(self.emoji, lowerCamelCase)
return vocab_file, emoji_file
class _lowerCamelCase( _a ):
def __init__( self, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> List[Any]:
"""simple docstring"""
_lowercase : List[Any] = vocab # same as swe
_lowercase : int = ids_to_tokens # same as bpe
_lowercase : Tuple = emoji
_lowercase : Tuple = np.max([len(lowerCamelCase) for w in self.vocab.keys()])
_lowercase : Dict = re.compile(R'(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)')
_lowercase : Tuple = re.compile(R'[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*')
_lowercase : List[Any] = re.compile(R'[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}')
_lowercase : Union[str, Any] = re.compile(
R'([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*')
_lowercase : List[Any] = re.compile(
R'(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*')
_lowercase : Optional[Any] = re.compile(
R'((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*')
_lowercase : List[str] = '─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿'
_lowercase : int = '▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟'
_lowercase : str = str.maketrans({k: '<BLOCK>' for k in keisen + blocks})
def __len__( self) -> List[str]:
"""simple docstring"""
return len(self.ids_to_tokens)
def UpperCamelCase ( self, lowerCamelCase) -> Tuple:
"""simple docstring"""
_lowercase : Union[str, Any] = self.content_repattera.sub('<URL>', lowerCamelCase)
_lowercase : Dict = self.content_repattera.sub('<EMAIL>', lowerCamelCase)
_lowercase : List[str] = self.content_repattera.sub('<TEL>', lowerCamelCase)
_lowercase : Optional[int] = self.content_repattera.sub('<DATE>', lowerCamelCase)
_lowercase : Tuple = self.content_repattera.sub('<DATE>', lowerCamelCase)
_lowercase : str = self.content_repattera.sub('<PRICE>', lowerCamelCase)
_lowercase : int = content.translate(self.content_transa)
while "<BLOCK><BLOCK>" in content:
_lowercase : Optional[Any] = content.replace('<BLOCK><BLOCK>', '<BLOCK>')
return content
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase=False) -> List[Any]:
"""simple docstring"""
_lowercase : int = text.replace(' ', '<SP>')
_lowercase : Optional[Any] = text.replace(' ', '<SP>')
_lowercase : Tuple = text.replace('\r\n', '<BR>')
_lowercase : Tuple = text.replace('\n', '<BR>')
_lowercase : Dict = text.replace('\r', '<BR>')
_lowercase : str = text.replace('\t', '<TAB>')
_lowercase : Optional[Any] = text.replace('—', 'ー')
_lowercase : Union[str, Any] = text.replace('−', 'ー')
for k, v in self.emoji["emoji"].items():
if k in text:
_lowercase : Optional[int] = text.replace(lowerCamelCase, lowerCamelCase)
if clean:
_lowercase : Tuple = self.clean_text(lowerCamelCase)
def check_simbol(lowerCamelCase):
_lowercase : Optional[int] = x.encode()
if len(lowerCamelCase) == 1 and len(lowerCamelCase) == 2:
_lowercase : List[Any] = (int(e[0]) << 8) + int(e[1])
if (
(c >= 0xC2A1 and c <= 0xC2BF)
or (c >= 0xC780 and c <= 0xC783)
or (c >= 0xCAB9 and c <= 0xCBBF)
or (c >= 0xCC80 and c <= 0xCDA2)
):
return True
return False
def checkuae(lowerCamelCase):
_lowercase : int = x.encode()
if len(lowerCamelCase) == 1 and len(lowerCamelCase) == 3:
_lowercase : Optional[Any] = (int(e[0]) << 16) + (int(e[1]) << 8) + int(e[2])
if c >= 0xE28080 and c <= 0xE2B07F:
return True
return False
_lowercase : Dict = 0
_lowercase : Optional[int] = []
while pos < len(lowerCamelCase):
_lowercase : Optional[Any] = min(len(lowerCamelCase), pos + self.maxlen + 1) if text[pos] == '<' else pos + 3
_lowercase : Optional[int] = [] # (token_id, token, pos)
for e in range(lowerCamelCase, lowerCamelCase, -1):
_lowercase : Optional[Any] = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(lowerCamelCase) > 2:
_lowercase : List[Any] = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e))
if len(lowerCamelCase) > 0:
# the smallest token_id is adopted
_lowercase , _lowercase , _lowercase : List[str] = sorted(lowerCamelCase, key=lambda lowerCamelCase: x[0])[0]
result.append(lowerCamelCase)
_lowercase : List[str] = e
else:
_lowercase : Optional[int] = pos + 1
_lowercase : int = text[pos:end]
if check_simbol(lowerCamelCase):
result.append('<KIGOU>')
elif checkuae(lowerCamelCase):
result.append('<U2000U2BFF>')
else:
for i in wd.encode('utf-8'):
result.append('<|byte%d|>' % i)
_lowercase : Optional[Any] = end
return result
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase="\n") -> List[str]:
"""simple docstring"""
_lowercase : List[Any] = []
_lowercase : str = []
_lowercase : str = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2]))
else:
if len(lowerCamelCase) > 0:
words.append(bytearray(lowerCamelCase).decode('utf-8', errors='replace'))
_lowercase : Optional[Any] = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji['emoji_inv'][word])
elif word == "<SP>":
words.append(' ')
elif word == "<BR>":
words.append(lowerCamelCase)
elif word == "<TAB>":
words.append('\t')
elif word == "<BLOCK>":
words.append('▀')
elif word == "<KIGOU>":
words.append('ǀ')
elif word == "<U2000U2BFF>":
words.append('‖')
else:
words.append(lowerCamelCase)
if len(lowerCamelCase) > 0:
words.append(bytearray(lowerCamelCase).decode('utf-8', errors='replace'))
_lowercase : Union[str, Any] = ''.join(lowerCamelCase)
return text
| 89 |
'''simple docstring'''
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
__magic_name__ : Tuple = 0
__magic_name__ : Dict = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
__magic_name__ : str = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
__magic_name__ : Dict = tuple[int, int]
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ):
_snake_case = pos_x
_snake_case = pos_y
_snake_case = (pos_y, pos_x)
_snake_case = goal_x
_snake_case = goal_y
_snake_case = g_cost
_snake_case = parent
_snake_case = self.calculate_heuristic()
_snake_case = self.g_cost + self.h_cost
def UpperCamelCase( self ):
_snake_case = self.pos_x - self.goal_x
_snake_case = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(lowerCamelCase ) + abs(lowerCamelCase )
else:
return sqrt(dy**2 + dx**2 )
def __lt__( self , lowerCamelCase ):
return self.f_cost < other.f_cost
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self , lowerCamelCase , lowerCamelCase ):
_snake_case = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , lowerCamelCase )
_snake_case = Node(goal[1] , goal[0] , goal[1] , goal[0] , 99_999 , lowerCamelCase )
_snake_case = [self.start]
_snake_case = []
_snake_case = False
def UpperCamelCase( self ):
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
_snake_case = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(lowerCamelCase )
self.closed_nodes.append(lowerCamelCase )
_snake_case = self.get_successors(lowerCamelCase )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(lowerCamelCase )
else:
# retrieve the best current path
_snake_case = self.open_nodes.pop(self.open_nodes.index(lowerCamelCase ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(lowerCamelCase )
else:
self.open_nodes.append(lowerCamelCase )
return [self.start.pos]
def UpperCamelCase( self , lowerCamelCase ):
_snake_case = []
for action in delta:
_snake_case = parent.pos_x + action[1]
_snake_case = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(lowerCamelCase ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
lowerCamelCase , lowerCamelCase , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , lowerCamelCase , ) )
return successors
def UpperCamelCase( self , lowerCamelCase ):
_snake_case = node
_snake_case = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
_snake_case = current_node.parent
path.reverse()
return path
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self , lowerCamelCase , lowerCamelCase ):
_snake_case = AStar(lowerCamelCase , lowerCamelCase )
_snake_case = AStar(lowerCamelCase , lowerCamelCase )
_snake_case = False
def UpperCamelCase( self ):
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
_snake_case = self.fwd_astar.open_nodes.pop(0 )
_snake_case = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
lowerCamelCase , lowerCamelCase )
self.fwd_astar.closed_nodes.append(lowerCamelCase )
self.bwd_astar.closed_nodes.append(lowerCamelCase )
_snake_case = current_bwd_node
_snake_case = current_fwd_node
_snake_case = {
self.fwd_astar: self.fwd_astar.get_successors(lowerCamelCase ),
self.bwd_astar: self.bwd_astar.get_successors(lowerCamelCase ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(lowerCamelCase )
else:
# retrieve the best current path
_snake_case = astar.open_nodes.pop(
astar.open_nodes.index(lowerCamelCase ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(lowerCamelCase )
else:
astar.open_nodes.append(lowerCamelCase )
return [self.fwd_astar.start.pos]
def UpperCamelCase( self , lowerCamelCase , lowerCamelCase ):
_snake_case = self.fwd_astar.retrace_path(lowerCamelCase )
_snake_case = self.bwd_astar.retrace_path(lowerCamelCase )
bwd_path.pop()
bwd_path.reverse()
_snake_case = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
__magic_name__ : Optional[int] = (0, 0)
__magic_name__ : Any = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
__magic_name__ : Any = time.time()
__magic_name__ : Optional[int] = AStar(init, goal)
__magic_name__ : str = a_star.search()
__magic_name__ : List[Any] = time.time() - start_time
print(F'AStar execution time = {end_time:f} seconds')
__magic_name__ : List[str] = time.time()
__magic_name__ : Optional[Any] = BidirectionalAStar(init, goal)
__magic_name__ : Optional[int] = time.time() - bd_start_time
print(F'BidirectionalAStar execution time = {bd_end_time:f} seconds')
| 672 | 0 |
'''simple docstring'''
import json
import os
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'''vocab_file''': '''vocab.json''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
'''merges_file''': '''merges.txt''',
}
__UpperCAmelCase = {
'''vocab_file''': {
'''facebook/s2t-wav2vec2-large-en-de''': (
'''https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json'''
),
},
'''tokenizer_config_file''': {
'''facebook/s2t-wav2vec2-large-en-de''': (
'''https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json'''
),
},
'''merges_file''': {
'''facebook/s2t-wav2vec2-large-en-de''': (
'''https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt'''
),
},
}
__UpperCAmelCase = '''</w>'''
__UpperCAmelCase = '''@@ '''
def _snake_case ( A ) -> Dict:
lowerCAmelCase__ = set()
lowerCAmelCase__ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowerCAmelCase__ = char
return pairs
# Speech2Text2 has no max input length
__UpperCAmelCase = {'''facebook/s2t-wav2vec2-large-en-de''': 1_024}
class a__ ( a__ ):
'''simple docstring'''
lowercase__ : List[Any] = VOCAB_FILES_NAMES
lowercase__ : List[str] = PRETRAINED_VOCAB_FILES_MAP
lowercase__ : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ : str = ["input_ids", "attention_mask"]
def __init__( self , lowerCamelCase_ , lowerCamelCase_="<s>" , lowerCamelCase_="<pad>" , lowerCamelCase_="</s>" , lowerCamelCase_="<unk>" , lowerCamelCase_=False , lowerCamelCase_=None , **lowerCamelCase_ , ) -> int:
super().__init__(
unk_token=lowerCamelCase_ , bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , do_lower_case=lowerCamelCase_ , **lowerCamelCase_ , )
lowerCAmelCase__ = do_lower_case
with open(lowerCamelCase_ , encoding='''utf-8''' ) as vocab_handle:
lowerCAmelCase__ = json.load(lowerCamelCase_ )
lowerCAmelCase__ = {v: k for k, v in self.encoder.items()}
if merges_file is None:
logger.info(F"""No merges files provided. {self.__class__.__name__} can only be used for decoding.""" )
lowerCAmelCase__ = None
lowerCAmelCase__ = None
else:
with open(lowerCamelCase_ , encoding='''utf-8''' ) as merges_handle:
lowerCAmelCase__ = merges_handle.read().split('''\n''' )[:-1]
lowerCAmelCase__ = [tuple(merge.split()[:2] ) for merge in merges]
lowerCAmelCase__ = dict(zip(lowerCamelCase_ , range(len(lowerCamelCase_ ) ) ) )
lowerCAmelCase__ = {}
@property
def __SCREAMING_SNAKE_CASE ( self ) -> int:
return len(self.decoder )
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
return dict(self.encoder , **self.added_tokens_encoder )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> Tuple:
lowerCAmelCase__ = tuple(token[:-1] ) + (token[-1] + BPE_TOKEN_MERGES,)
if token in self.cache:
return self.cache[token]
lowerCAmelCase__ = get_pairs(lowerCamelCase_ )
if not pairs:
return token
while True:
lowerCAmelCase__ = min(lowerCamelCase_ , key=lambda lowerCamelCase_ : self.bpe_ranks.get(lowerCamelCase_ , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
lowerCAmelCase__ , lowerCAmelCase__ = bigram
lowerCAmelCase__ = []
lowerCAmelCase__ = 0
while i < len(lowerCamelCase_ ):
try:
lowerCAmelCase__ = word.index(lowerCamelCase_ , lowerCamelCase_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowerCAmelCase__ = j
if word[i] == first and i < len(lowerCamelCase_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowerCAmelCase__ = tuple(lowerCamelCase_ )
lowerCAmelCase__ = new_word
if len(lowerCamelCase_ ) == 1:
break
else:
lowerCAmelCase__ = get_pairs(lowerCamelCase_ )
lowerCAmelCase__ = ''' '''.join(lowerCamelCase_ )
if word == "\n " + BPE_TOKEN_MERGES:
lowerCAmelCase__ = '''\n''' + BPE_TOKEN_MERGES
if word.endswith(lowerCamelCase_ ):
lowerCAmelCase__ = word.replace(lowerCamelCase_ , '''''' )
lowerCAmelCase__ = word.replace(''' ''' , lowerCamelCase_ )
lowerCAmelCase__ = word
return word
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> str:
if self.bpe_ranks is None:
raise ValueError(
'''This tokenizer was instantiated without a `merges.txt` file, so'''
''' that it can only be used for decoding, not for encoding.'''
'''Make sure to provide `merges.txt` file at instantiation to enable '''
'''encoding.''' )
if self.do_lower_case:
lowerCAmelCase__ = text.lower()
lowerCAmelCase__ = text.split()
lowerCAmelCase__ = []
for token in text:
if token:
split_tokens.extend(list(self.bpe(lowerCamelCase_ ).split(''' ''' ) ) )
return split_tokens
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> int:
return self.encoder.get(lowerCamelCase_ , self.encoder.get(self.unk_token ) )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> str:
lowerCAmelCase__ = self.decoder.get(lowerCamelCase_ , self.unk_token )
return result
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> str:
lowerCAmelCase__ = ''' '''.join(lowerCamelCase_ )
# make sure @@ tokens are concatenated
lowerCAmelCase__ = ''''''.join(string.split(lowerCamelCase_ ) )
return string
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ = None ) -> Tuple[str]:
if not os.path.isdir(lowerCamelCase_ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCAmelCase__ = os.path.join(
lowerCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCAmelCase__ = os.path.join(
lowerCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(lowerCamelCase_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCamelCase_ , ensure_ascii=lowerCamelCase_ ) + '''\n''' )
lowerCAmelCase__ = 0
if self.bpe_ranks is None:
return (vocab_file,)
with open(lowerCamelCase_ , '''w''' , encoding='''utf-8''' ) as writer:
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCamelCase_ : kv[1] ):
if index != token_index:
logger.warning(
F"""Saving vocabulary to {merges_file}: BPE merge indices are not consecutive."""
''' Please check that the tokenizer is not corrupted!''' )
lowerCAmelCase__ = token_index
writer.write(''' '''.join(lowerCamelCase_ ) + '''\n''' )
index += 1
return (vocab_file, merges_file) | 90 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__magic_name__ : int = {
"""configuration_nezha""": ["""NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """NezhaConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : List[Any] = [
"""NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""NezhaForNextSentencePrediction""",
"""NezhaForMaskedLM""",
"""NezhaForPreTraining""",
"""NezhaForMultipleChoice""",
"""NezhaForQuestionAnswering""",
"""NezhaForSequenceClassification""",
"""NezhaForTokenClassification""",
"""NezhaModel""",
"""NezhaPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nezha import (
NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
NezhaPreTrainedModel,
)
else:
import sys
__magic_name__ : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 672 | 0 |
"""simple docstring"""
from __future__ import annotations
def _snake_case ( snake_case__ : list ):
if len(snake_case__ ) == 0:
return []
A , A = min(snake_case__ ), max(snake_case__ )
A = int(max_value - min_value ) + 1
A = [[] for _ in range(snake_case__ )]
for i in my_list:
buckets[int(i - min_value )].append(snake_case__ )
return [v for bucket in buckets for v in sorted(snake_case__ )]
if __name__ == "__main__":
from doctest import testmod
testmod()
assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bucket_sort([0, 1, -10, 15, 2, -2]) == [-10, -2, 0, 1, 2, 15] | 91 |
'''simple docstring'''
import string
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
_snake_case = ""
for i in sequence:
_snake_case = ord(SCREAMING_SNAKE_CASE__ )
if 65 <= extract <= 90:
output += chr(1_55 - extract )
elif 97 <= extract <= 1_22:
output += chr(2_19 - extract )
else:
output += i
return output
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
_snake_case = string.ascii_letters
_snake_case = string.ascii_lowercase[::-1] + string.ascii_uppercase[::-1]
return "".join(
letters_reversed[letters.index(SCREAMING_SNAKE_CASE__ )] if c in letters else c for c in sequence )
def snake_case_ ( ):
'''simple docstring'''
from timeit import timeit
print("Running performance benchmarks..." )
_snake_case = "from string import printable ; from __main__ import atbash, atbash_slow"
print(f'''> atbash_slow(): {timeit("atbash_slow(printable)" , setup=SCREAMING_SNAKE_CASE__ )} seconds''' )
print(f'''> atbash(): {timeit("atbash(printable)" , setup=SCREAMING_SNAKE_CASE__ )} seconds''' )
if __name__ == "__main__":
for example in ("ABCDEFGH", "123GGjj", "testStringtest", "with space"):
print(F'{example} encrypted in atbash: {atbash(example)}')
benchmark()
| 672 | 0 |
'''simple docstring'''
def _lowerCAmelCase ( __magic_name__ : int ) -> None:
lowercase : int =generate_pascal_triangle(__magic_name__ )
for row_idx in range(__magic_name__ ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=''' ''' )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] , end=''' ''' )
else:
print(triangle[row_idx][col_idx] , end='''''' )
print()
def _lowerCAmelCase ( __magic_name__ : int ) -> list[list[int]]:
if not isinstance(__magic_name__ , __magic_name__ ):
raise TypeError('''The input value of \'num_rows\' should be \'int\'''' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'''The input value of \'num_rows\' should be greater than or equal to 0''' )
lowercase : list[list[int]] =[]
for current_row_idx in range(__magic_name__ ):
lowercase : List[str] =populate_current_row(__magic_name__ , __magic_name__ )
triangle.append(__magic_name__ )
return triangle
def _lowerCAmelCase ( __magic_name__ : list[list[int]] , __magic_name__ : int ) -> list[int]:
lowercase : int =[-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
lowercase , lowercase : Any =1, 1
for current_col_idx in range(1 , __magic_name__ ):
calculate_current_element(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
return current_row
def _lowerCAmelCase ( __magic_name__ : list[list[int]] , __magic_name__ : list[int] , __magic_name__ : int , __magic_name__ : int , ) -> None:
lowercase : Any =triangle[current_row_idx - 1][current_col_idx - 1]
lowercase : Union[str, Any] =triangle[current_row_idx - 1][current_col_idx]
lowercase : int =above_to_left_elt + above_to_right_elt
def _lowerCAmelCase ( __magic_name__ : int ) -> list[list[int]]:
if not isinstance(__magic_name__ , __magic_name__ ):
raise TypeError('''The input value of \'num_rows\' should be \'int\'''' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'''The input value of \'num_rows\' should be greater than or equal to 0''' )
lowercase : list[list[int]] =[[1]]
for row_index in range(1 , __magic_name__ ):
lowercase : List[str] =[0] + result[-1] + [0]
lowercase : Tuple =row_index + 1
# Calculate the number of distinct elements in a row
lowercase : List[Any] =sum(divmod(__magic_name__ , 2 ) )
lowercase : int =[
temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 )
]
lowercase : Union[str, Any] =row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
lowercase : str =row_first_half + row_second_half
result.append(__magic_name__ )
return result
def _lowerCAmelCase ( ) -> None:
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(__magic_name__ : Callable , __magic_name__ : int ) -> None:
lowercase : str =f'''{func.__name__}({value})'''
lowercase : Optional[Any] =timeit(f'''__main__.{call}''' , setup='''import __main__''' )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(f'''{call:38} -- {timing:.4f} seconds''' )
for value in range(15 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(__magic_name__ , __magic_name__ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 92 |
'''simple docstring'''
import numpy as np
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return 1 / (1 + np.exp(-vector ))
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return vector * sigmoid(1.702 * vector )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 672 | 0 |
"""simple docstring"""
__A = 9.8_0665
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = g ) ->float:
"""simple docstring"""
if fluid_density <= 0:
raise ValueError('Impossible fluid density' )
if volume < 0:
raise ValueError('Impossible Object volume' )
if gravity <= 0:
raise ValueError('Impossible Gravity' )
return fluid_density * gravity * volume
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
| 93 |
'''simple docstring'''
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('''TEST_SAGEMAKER''' , '''False''' ) ) is not True , reason='''Skipping test because should only be run when releasing minor transformers version''' , )
@pytest.mark.usefixtures('''sm_env''' )
@parameterized_class(
[
{
'''framework''': '''pytorch''',
'''script''': '''run_glue.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.g4dn.xlarge''',
'''results''': {'''train_runtime''': 650, '''eval_accuracy''': 0.6, '''eval_loss''': 0.9},
},
{
'''framework''': '''tensorflow''',
'''script''': '''run_tf.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.g4dn.xlarge''',
'''results''': {'''train_runtime''': 600, '''eval_accuracy''': 0.3, '''eval_loss''': 0.9},
},
] )
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase( self ):
if self.framework == "pytorch":
subprocess.run(
F'''cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'''.split() , encoding="utf-8" , check=lowerCamelCase , )
assert hasattr(self , "env" )
def UpperCamelCase( self , lowerCamelCase=1 ):
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=F'''{self.env.base_job_name}-single''' , instance_count=lowerCamelCase , instance_type=self.instance_type , debugger_hook_config=lowerCamelCase , hyperparameters={**self.env.hyperparameters, "model_name_or_path": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , py_version="py36" , )
def UpperCamelCase( self , lowerCamelCase ):
TrainingJobAnalytics(lowerCamelCase ).export_csv(F'''{self.env.test_path}/{job_name}_metrics.csv''' )
def UpperCamelCase( self ):
# create estimator
_snake_case = self.create_estimator()
# run training
estimator.fit()
# result dataframe
_snake_case = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
_snake_case = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"] )
_snake_case = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
_snake_case = (
Session().describe_training_job(estimator.latest_training_job.name ).get("TrainingTimeInSeconds" , 999_999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy )
assert all(t <= self.results["eval_loss"] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F'''{estimator.latest_training_job.name}.json''' , "w" ) as outfile:
json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss} , lowerCamelCase )
| 672 | 0 |
'''simple docstring'''
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def lowercase_ ( __A : List[Any] ) -> Dict:
"""simple docstring"""
lowercase , lowercase : List[Any] =image.size
lowercase , lowercase : str =(x - x % 3_2 for x in (w, h)) # resize to integer multiple of 32
lowercase : Tuple =image.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] )
lowercase : int =np.array(__A ).astype(np.floataa ) / 255.0
lowercase : Union[str, Any] =image[None].transpose(0 , 3 , 1 , 2 )
lowercase : Optional[int] =torch.from_numpy(__A )
return 2.0 * image - 1.0
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
def __init__( self : Union[str, Any] , UpperCAmelCase : VQModel , UpperCAmelCase : UNetaDModel , UpperCAmelCase : Union[
DDIMScheduler,
PNDMScheduler,
LMSDiscreteScheduler,
EulerDiscreteScheduler,
EulerAncestralDiscreteScheduler,
DPMSolverMultistepScheduler,
] , ) -> Tuple:
'''simple docstring'''
super().__init__()
self.register_modules(vqvae=UpperCAmelCase , unet=UpperCAmelCase , scheduler=UpperCAmelCase )
@torch.no_grad()
def __call__( self : Union[str, Any] , UpperCAmelCase : Union[torch.Tensor, PIL.Image.Image] = None , UpperCAmelCase : Optional[int] = 1 , UpperCAmelCase : Optional[int] = 100 , UpperCAmelCase : Optional[float] = 0.0 , UpperCAmelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCAmelCase : Optional[str] = "pil" , UpperCAmelCase : bool = True , ) -> Union[Tuple, ImagePipelineOutput]:
'''simple docstring'''
if isinstance(UpperCAmelCase , PIL.Image.Image ):
lowercase : Tuple =1
elif isinstance(UpperCAmelCase , torch.Tensor ):
lowercase : Optional[Any] =image.shape[0]
else:
raise ValueError(f'`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(UpperCAmelCase )}' )
if isinstance(UpperCAmelCase , PIL.Image.Image ):
lowercase : List[str] =preprocess(UpperCAmelCase )
lowercase , lowercase : int =image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
lowercase : Any =(batch_size, self.unet.config.in_channels // 2, height, width)
lowercase : Any =next(self.unet.parameters() ).dtype
lowercase : Union[str, Any] =randn_tensor(UpperCAmelCase , generator=UpperCAmelCase , device=self.device , dtype=UpperCAmelCase )
lowercase : Optional[int] =image.to(device=self.device , dtype=UpperCAmelCase )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(UpperCAmelCase , device=self.device )
lowercase : List[str] =self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
lowercase : Optional[int] =latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
lowercase : int ='''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowercase : Any ={}
if accepts_eta:
lowercase : List[str] =eta
for t in self.progress_bar(UpperCAmelCase ):
# concat latents and low resolution image in the channel dimension.
lowercase : str =torch.cat([latents, image] , dim=1 )
lowercase : Tuple =self.scheduler.scale_model_input(UpperCAmelCase , UpperCAmelCase )
# predict the noise residual
lowercase : List[str] =self.unet(UpperCAmelCase , UpperCAmelCase ).sample
# compute the previous noisy sample x_t -> x_t-1
lowercase : int =self.scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ).prev_sample
# decode the image latents with the VQVAE
lowercase : Any =self.vqvae.decode(UpperCAmelCase ).sample
lowercase : Union[str, Any] =torch.clamp(UpperCAmelCase , -1.0 , 1.0 )
lowercase : List[str] =image / 2 + 0.5
lowercase : Dict =image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowercase : Any =self.numpy_to_pil(UpperCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCAmelCase )
| 94 |
'''simple docstring'''
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( __UpperCamelCase ):
'''simple docstring'''
UpperCAmelCase__ : int = DistilBertTokenizer
UpperCAmelCase__ : Union[str, Any] = DistilBertTokenizerFast
UpperCAmelCase__ : List[str] = True
@slow
def UpperCamelCase( self ):
_snake_case = DistilBertTokenizer.from_pretrained("distilbert-base-uncased" )
_snake_case = tokenizer.encode("sequence builders" , add_special_tokens=lowerCamelCase )
_snake_case = tokenizer.encode("multi-sequence build" , add_special_tokens=lowerCamelCase )
_snake_case = tokenizer.build_inputs_with_special_tokens(lowerCamelCase )
_snake_case = tokenizer.build_inputs_with_special_tokens(lowerCamelCase , lowerCamelCase )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 672 | 0 |
"""simple docstring"""
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
'''snap-research/efficientformer-l1-300''': (
'''https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json'''
),
}
class UpperCamelCase_ (__A ):
__magic_name__ = '''efficientformer'''
def __init__( self : int , lowerCAmelCase_ : List[int] = [3, 2, 6, 4] , lowerCAmelCase_ : List[int] = [48, 96, 224, 448] , lowerCAmelCase_ : List[bool] = [True, True, True, True] , lowerCAmelCase_ : int = 448 , lowerCAmelCase_ : int = 32 , lowerCAmelCase_ : int = 4 , lowerCAmelCase_ : int = 7 , lowerCAmelCase_ : int = 5 , lowerCAmelCase_ : int = 8 , lowerCAmelCase_ : int = 4 , lowerCAmelCase_ : float = 0.0 , lowerCAmelCase_ : int = 16 , lowerCAmelCase_ : int = 3 , lowerCAmelCase_ : int = 3 , lowerCAmelCase_ : int = 3 , lowerCAmelCase_ : int = 2 , lowerCAmelCase_ : int = 1 , lowerCAmelCase_ : float = 0.0 , lowerCAmelCase_ : int = 1 , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : float = 1e-5 , lowerCAmelCase_ : str = "gelu" , lowerCAmelCase_ : float = 0.0_2 , lowerCAmelCase_ : float = 1e-12 , lowerCAmelCase_ : int = 224 , lowerCAmelCase_ : float = 1e-05 , **lowerCAmelCase_ : List[str] , ) -> None:
super().__init__(**lowerCAmelCase_ )
UpperCAmelCase_ : int = hidden_act
UpperCAmelCase_ : Optional[int] = hidden_dropout_prob
UpperCAmelCase_ : Any = hidden_sizes
UpperCAmelCase_ : Dict = num_hidden_layers
UpperCAmelCase_ : List[Any] = num_attention_heads
UpperCAmelCase_ : int = initializer_range
UpperCAmelCase_ : Any = layer_norm_eps
UpperCAmelCase_ : Any = patch_size
UpperCAmelCase_ : Optional[int] = num_channels
UpperCAmelCase_ : Any = depths
UpperCAmelCase_ : Optional[Any] = mlp_expansion_ratio
UpperCAmelCase_ : Tuple = downsamples
UpperCAmelCase_ : List[Any] = dim
UpperCAmelCase_ : Tuple = key_dim
UpperCAmelCase_ : Tuple = attention_ratio
UpperCAmelCase_ : Any = resolution
UpperCAmelCase_ : Any = pool_size
UpperCAmelCase_ : str = downsample_patch_size
UpperCAmelCase_ : Optional[Any] = downsample_stride
UpperCAmelCase_ : Tuple = downsample_pad
UpperCAmelCase_ : Optional[Any] = drop_path_rate
UpperCAmelCase_ : List[str] = num_metaad_blocks
UpperCAmelCase_ : Optional[Any] = distillation
UpperCAmelCase_ : Optional[Any] = use_layer_scale
UpperCAmelCase_ : Dict = layer_scale_init_value
UpperCAmelCase_ : Any = image_size
UpperCAmelCase_ : Optional[Any] = batch_norm_eps
| 95 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__magic_name__ : Optional[int] = {
"""configuration_swiftformer""": [
"""SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""SwiftFormerConfig""",
"""SwiftFormerOnnxConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : Optional[int] = [
"""SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SwiftFormerForImageClassification""",
"""SwiftFormerModel""",
"""SwiftFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
__magic_name__ : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 672 | 0 |
"""simple docstring"""
from __future__ import annotations
def a ( __UpperCAmelCase : list[float] , __UpperCAmelCase : Any ) -> Optional[Any]:
print(f'Vertex\tShortest Distance from vertex {src}' )
for i, d in enumerate(__UpperCAmelCase ):
print(f'{i}\t\t{d}' )
def a ( __UpperCAmelCase : list[dict[str, int]] , __UpperCAmelCase : list[float] , __UpperCAmelCase : int ) -> int:
for j in range(__UpperCAmelCase ):
__magic_name__, __magic_name__, __magic_name__: Dict = (graph[j][k] for k in ["""src""", """dst""", """weight"""])
if distance[u] != float("""inf""" ) and distance[u] + w < distance[v]:
return True
return False
def a ( __UpperCAmelCase : list[dict[str, int]] , __UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : int ) -> list[float]:
__magic_name__: List[Any] = [float("""inf""" )] * vertex_count
__magic_name__: Tuple = 0.0
for _ in range(vertex_count - 1 ):
for j in range(__UpperCAmelCase ):
__magic_name__, __magic_name__, __magic_name__: Union[str, Any] = (graph[j][k] for k in ["""src""", """dst""", """weight"""])
if distance[u] != float("""inf""" ) and distance[u] + w < distance[v]:
__magic_name__: int = distance[u] + w
__magic_name__: List[str] = check_negative_cycle(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
if negative_cycle_exists:
raise Exception("""Negative cycle found""" )
return distance
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowerCamelCase = int(input('Enter number of vertices: ').strip())
__lowerCamelCase = int(input('Enter number of edges: ').strip())
__lowerCamelCase = [{} for _ in range(E)]
for i in range(E):
print('Edge ', i + 1)
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = (
int(x)
for x in input('Enter source, destination, weight: ').strip().split(' ')
)
__lowerCamelCase = {'src': src, 'dst': dest, 'weight': weight}
__lowerCamelCase = int(input('\nEnter shortest path source:').strip())
__lowerCamelCase = bellman_ford(graph, V, E, source)
print_distance(shortest_distance, 0)
| 96 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
__magic_name__ : Union[str, Any] = logging.get_logger(__name__)
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=False ):
'''simple docstring'''
_snake_case = "backbone." if is_semantic else ""
_snake_case = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''{prefix}blocks.{i}.norm1.weight''', f'''beit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm1.bias''', f'''beit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(f'''{prefix}blocks.{i}.attn.proj.weight''', f'''beit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append(
(f'''{prefix}blocks.{i}.attn.proj.bias''', f'''beit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm2.weight''', f'''beit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm2.bias''', f'''beit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc1.weight''', f'''beit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc1.bias''', f'''beit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc2.weight''', f'''beit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc2.bias''', f'''beit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
(f'''{prefix}cls_token''', "beit.embeddings.cls_token"),
(f'''{prefix}patch_embed.proj.weight''', "beit.embeddings.patch_embeddings.projection.weight"),
(f'''{prefix}patch_embed.proj.bias''', "beit.embeddings.patch_embeddings.projection.bias"),
(f'''{prefix}pos_embed''', "beit.embeddings.position_embeddings"),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
("mask_token", "beit.embeddings.mask_token"),
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
("fc_norm.weight", "beit.pooler.layernorm.weight"),
("fc_norm.bias", "beit.pooler.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=False ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
_snake_case = "backbone." if is_semantic else ""
# queries, keys and values
_snake_case = state_dict.pop(f'''{prefix}blocks.{i}.attn.qkv.weight''' )
_snake_case = state_dict.pop(f'''{prefix}blocks.{i}.attn.q_bias''' )
_snake_case = state_dict.pop(f'''{prefix}blocks.{i}.attn.v_bias''' )
_snake_case = in_proj_weight[
: config.hidden_size, :
]
_snake_case = q_bias
_snake_case = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_snake_case = in_proj_weight[
-config.hidden_size :, :
]
_snake_case = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
_snake_case = state_dict.pop(f'''{prefix}blocks.{i}.gamma_1''' )
_snake_case = state_dict.pop(f'''{prefix}blocks.{i}.gamma_2''' )
_snake_case = gamma_a
_snake_case = gamma_a
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
_snake_case = dct.pop(SCREAMING_SNAKE_CASE__ )
_snake_case = val
def snake_case_ ( ):
'''simple docstring'''
_snake_case = "http://images.cocodataset.org/val2017/000000039769.jpg"
_snake_case = Image.open(requests.get(SCREAMING_SNAKE_CASE__ , stream=SCREAMING_SNAKE_CASE__ ).raw )
return im
@torch.no_grad()
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False ):
'''simple docstring'''
_snake_case = False if "rvlcdip" in checkpoint_url else True
_snake_case = BeitConfig(use_absolute_position_embeddings=SCREAMING_SNAKE_CASE__ , use_mask_token=SCREAMING_SNAKE_CASE__ )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
_snake_case = 10_24
_snake_case = 40_96
_snake_case = 24
_snake_case = 16
# labels
if "rvlcdip" in checkpoint_url:
_snake_case = 16
_snake_case = "huggingface/label-files"
_snake_case = "rvlcdip-id2label.json"
_snake_case = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , repo_type="dataset" ) , "r" ) )
_snake_case = {int(SCREAMING_SNAKE_CASE__ ): v for k, v in idalabel.items()}
_snake_case = idalabel
_snake_case = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
_snake_case = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE__ , map_location="cpu" )["model"]
_snake_case = create_rename_keys(SCREAMING_SNAKE_CASE__ , has_lm_head=SCREAMING_SNAKE_CASE__ )
for src, dest in rename_keys:
rename_key(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
read_in_q_k_v(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , has_lm_head=SCREAMING_SNAKE_CASE__ )
# load HuggingFace model
_snake_case = BeitForMaskedImageModeling(SCREAMING_SNAKE_CASE__ ) if has_lm_head else BeitForImageClassification(SCREAMING_SNAKE_CASE__ )
model.eval()
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
# Check outputs on an image
_snake_case = BeitImageProcessor(
size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=SCREAMING_SNAKE_CASE__ )
_snake_case = prepare_img()
_snake_case = image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors="pt" )
_snake_case = encoding["pixel_values"]
_snake_case = model(SCREAMING_SNAKE_CASE__ )
_snake_case = outputs.logits
# verify logits
_snake_case = [1, 16] if "rvlcdip" in checkpoint_url else [1, 1_96, 81_92]
assert logits.shape == torch.Size(SCREAMING_SNAKE_CASE__ ), "Shape of logits not as expected"
Path(SCREAMING_SNAKE_CASE__ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE__ )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
if push_to_hub:
if has_lm_head:
_snake_case = "dit-base" if "base" in checkpoint_url else "dit-large"
else:
_snake_case = "dit-base-finetuned-rvlcdip" if "dit-b" in checkpoint_url else "dit-large-finetuned-rvlcdip"
image_processor.push_to_hub(
repo_path_or_name=Path(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , organization="nielsr" , commit_message="Add image processor" , use_temp_dir=SCREAMING_SNAKE_CASE__ , )
model.push_to_hub(
repo_path_or_name=Path(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , organization="nielsr" , commit_message="Add model" , use_temp_dir=SCREAMING_SNAKE_CASE__ , )
if __name__ == "__main__":
__magic_name__ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_url""",
default="""https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth""",
type=str,
help="""URL to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
)
__magic_name__ : Dict = parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 672 | 0 |
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowercase__:
"""simple docstring"""
def __init__( self : str , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Union[str, Any]=1_3 , SCREAMING_SNAKE_CASE_ : str=3_0 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=2 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=3 , SCREAMING_SNAKE_CASE_ : int=True , SCREAMING_SNAKE_CASE_ : List[str]=True , SCREAMING_SNAKE_CASE_ : str=3_2 , SCREAMING_SNAKE_CASE_ : Any=5 , SCREAMING_SNAKE_CASE_ : Any=4 , SCREAMING_SNAKE_CASE_ : Optional[int]=3_7 , SCREAMING_SNAKE_CASE_ : List[Any]="gelu" , SCREAMING_SNAKE_CASE_ : Dict=0.1 , SCREAMING_SNAKE_CASE_ : Any=0.1 , SCREAMING_SNAKE_CASE_ : str=1_0 , SCREAMING_SNAKE_CASE_ : Optional[int]=0.02 , SCREAMING_SNAKE_CASE_ : str=3 , SCREAMING_SNAKE_CASE_ : int=0.6 , SCREAMING_SNAKE_CASE_ : Optional[Any]=None , ) -> str:
lowercase_ = parent
lowercase_ = batch_size
lowercase_ = image_size
lowercase_ = patch_size
lowercase_ = num_channels
lowercase_ = is_training
lowercase_ = use_labels
lowercase_ = hidden_size
lowercase_ = num_hidden_layers
lowercase_ = num_attention_heads
lowercase_ = intermediate_size
lowercase_ = hidden_act
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = type_sequence_label_size
lowercase_ = initializer_range
lowercase_ = mask_ratio
lowercase_ = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
lowercase_ = (image_size // patch_size) ** 2
lowercase_ = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def _lowercase ( self : List[Any] ) -> Union[str, Any]:
lowercase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase_ = None
if self.use_labels:
lowercase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase_ = self.get_config()
return config, pixel_values, labels
def _lowercase ( self : Union[str, Any] ) -> int:
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=SCREAMING_SNAKE_CASE_ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def _lowercase ( self : Tuple , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : str ) -> int:
lowercase_ = ViTMAEModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowercase_ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase ( self : int , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[str] ) -> Optional[Any]:
lowercase_ = ViTMAEForPreTraining(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowercase_ = model(SCREAMING_SNAKE_CASE_ )
lowercase_ = (self.image_size // self.patch_size) ** 2
lowercase_ = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
lowercase_ = 1
lowercase_ = ViTMAEForPreTraining(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowercase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase_ = model(SCREAMING_SNAKE_CASE_ )
lowercase_ = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def _lowercase ( self : Tuple ) -> str:
lowercase_ = self.prepare_config_and_inputs()
lowercase_ , lowercase_ , lowercase_ = config_and_inputs
lowercase_ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowercase__( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
a :Tuple = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
a :List[str] = {'feature-extraction': ViTMAEModel} if is_torch_available() else {}
a :Any = False
a :Optional[int] = False
a :Union[str, Any] = False
a :Any = False
def _lowercase ( self : str ) -> Optional[Any]:
lowercase_ = ViTMAEModelTester(self )
lowercase_ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , has_text_modality=SCREAMING_SNAKE_CASE_ , hidden_size=3_7 )
def _lowercase ( self : Dict ) -> Optional[Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViTMAE does not use inputs_embeds''' )
def _lowercase ( self : int ) -> Union[str, Any]:
pass
def _lowercase ( self : Union[str, Any] ) -> List[str]:
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ = model_class(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE_ , nn.Linear ) )
def _lowercase ( self : int ) -> Tuple:
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ = model_class(SCREAMING_SNAKE_CASE_ )
lowercase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ = [*signature.parameters.keys()]
lowercase_ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Union[str, Any] ) -> Optional[Any]:
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Union[str, Any] ) -> Tuple:
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : str , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> List[str]:
# make masks reproducible
np.random.seed(2 )
lowercase_ = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
lowercase_ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
lowercase_ = torch.from_numpy(SCREAMING_SNAKE_CASE_ )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
lowercase_ = pt_noise
super().check_pt_tf_models(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : str ) -> Optional[Any]:
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
lowercase_ = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
lowercase_ = outputs[0].cpu().numpy()
lowercase_ = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
lowercase_ = model_class.from_pretrained(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
lowercase_ = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
# Make sure we don't have nans
lowercase_ = after_outputs[0].cpu().numpy()
lowercase_ = 0
lowercase_ = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(SCREAMING_SNAKE_CASE_ , 1e-5 )
@unittest.skip(
reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.''' )
def _lowercase ( self : Dict ) -> int:
pass
@unittest.skip(
reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.''' )
def _lowercase ( self : int ) -> Optional[int]:
pass
@unittest.skip(
reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.''' )
def _lowercase ( self : Optional[Any] ) -> List[Any]:
pass
@unittest.skip(reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load''' )
def _lowercase ( self : Optional[Any] ) -> Union[str, Any]:
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def _lowercase ( self : List[Any] ) -> Optional[Any]:
pass
@slow
def _lowercase ( self : Dict ) -> List[str]:
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ = ViTMAEModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
def a ( ):
'''simple docstring'''
lowercase_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class lowercase__( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _lowercase ( self : Any ) -> Dict:
return ViTImageProcessor.from_pretrained('''facebook/vit-mae-base''' ) if is_vision_available() else None
@slow
def _lowercase ( self : Optional[int] ) -> List[Any]:
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
lowercase_ = ViTMAEForPreTraining.from_pretrained('''facebook/vit-mae-base''' ).to(SCREAMING_SNAKE_CASE_ )
lowercase_ = self.default_image_processor
lowercase_ = prepare_img()
lowercase_ = image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' ).to(SCREAMING_SNAKE_CASE_ )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
lowercase_ = ViTMAEConfig()
lowercase_ = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
lowercase_ = np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
lowercase_ = model(**SCREAMING_SNAKE_CASE_ , noise=torch.from_numpy(SCREAMING_SNAKE_CASE_ ).to(device=SCREAMING_SNAKE_CASE_ ) )
# verify the logits
lowercase_ = torch.Size((1, 1_9_6, 7_6_8) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE_ )
lowercase_ = torch.tensor(
[[-0.05_48, -1.70_23, -0.93_25], [0.37_21, -0.56_70, -0.22_33], [0.82_35, -1.38_78, -0.35_24]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(SCREAMING_SNAKE_CASE_ ) , atol=1e-4 ) )
| 97 |
'''simple docstring'''
import math
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not number % 2:
# Negatives, 0, 1 and all even numbers are not primes
return False
_snake_case = range(3 , int(math.sqrt(SCREAMING_SNAKE_CASE__ ) + 1 ) , 2 )
return not any(not number % i for i in odd_numbers )
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=1 , **SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
_snake_case = factor * value
_snake_case = value
while not is_prime(SCREAMING_SNAKE_CASE__ ):
value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1 , **SCREAMING_SNAKE_CASE__ )
return value
| 672 | 0 |
'''simple docstring'''
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case__ ( self : str ) -> Optional[int]:
'''simple docstring'''
debug_launcher(test_script.main )
def snake_case__ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
debug_launcher(test_ops.main )
| 98 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__magic_name__ : Dict = {
"""configuration_pix2struct""": [
"""PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""Pix2StructConfig""",
"""Pix2StructTextConfig""",
"""Pix2StructVisionConfig""",
],
"""processing_pix2struct""": ["""Pix2StructProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : List[str] = ["""Pix2StructImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : List[Any] = [
"""PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Pix2StructPreTrainedModel""",
"""Pix2StructForConditionalGeneration""",
"""Pix2StructVisionModel""",
"""Pix2StructTextModel""",
]
if TYPE_CHECKING:
from .configuration_pixastruct import (
PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP,
PixaStructConfig,
PixaStructTextConfig,
PixaStructVisionConfig,
)
from .processing_pixastruct import PixaStructProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_pixastruct import PixaStructImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pixastruct import (
PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST,
PixaStructForConditionalGeneration,
PixaStructPreTrainedModel,
PixaStructTextModel,
PixaStructVisionModel,
)
else:
import sys
__magic_name__ : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 672 | 0 |
def a (lowerCAmelCase__ ):
__a = len(lowerCAmelCase__ )
while cur > 1:
# Find the maximum number in arr
__a = arr.index(max(arr[0:cur] ) )
# Reverse from 0 to mi
__a = arr[mi::-1] + arr[mi + 1 : len(lowerCAmelCase__ )]
# Reverse whole list
__a = arr[cur - 1 :: -1] + arr[cur : len(lowerCAmelCase__ )]
cur -= 1
return arr
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = input('Enter numbers separated by a comma:\n').strip()
SCREAMING_SNAKE_CASE = [int(item) for item in user_input.split(',')]
print(pancake_sort(unsorted))
| 99 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
__magic_name__ : Optional[int] = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( __UpperCamelCase ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = ['''pixel_values''']
def __init__( self , lowerCamelCase = True , lowerCamelCase = None , lowerCamelCase = PILImageResampling.BILINEAR , lowerCamelCase = True , lowerCamelCase = None , lowerCamelCase = True , lowerCamelCase = 1 / 255 , lowerCamelCase = True , lowerCamelCase = None , lowerCamelCase = None , **lowerCamelCase , ):
super().__init__(**lowerCamelCase )
_snake_case = size if size is not None else {"shortest_edge": 256}
_snake_case = get_size_dict(lowerCamelCase , default_to_square=lowerCamelCase )
_snake_case = crop_size if crop_size is not None else {"height": 224, "width": 224}
_snake_case = get_size_dict(lowerCamelCase )
_snake_case = do_resize
_snake_case = size
_snake_case = resample
_snake_case = do_center_crop
_snake_case = crop_size
_snake_case = do_rescale
_snake_case = rescale_factor
_snake_case = do_normalize
_snake_case = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_snake_case = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCamelCase( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = PILImageResampling.BICUBIC , lowerCamelCase = None , **lowerCamelCase , ):
_snake_case = get_size_dict(lowerCamelCase , default_to_square=lowerCamelCase )
if "shortest_edge" not in size:
raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
_snake_case = get_resize_output_image_size(lowerCamelCase , size=size["shortest_edge"] , default_to_square=lowerCamelCase )
return resize(lowerCamelCase , size=lowerCamelCase , resample=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def UpperCamelCase( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase , ):
_snake_case = get_size_dict(lowerCamelCase )
return center_crop(lowerCamelCase , size=(size["height"], size["width"]) , data_format=lowerCamelCase , **lowerCamelCase )
def UpperCamelCase( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase ):
return rescale(lowerCamelCase , scale=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def UpperCamelCase( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase , ):
return normalize(lowerCamelCase , mean=lowerCamelCase , std=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def UpperCamelCase( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = ChannelDimension.FIRST , **lowerCamelCase , ):
_snake_case = do_resize if do_resize is not None else self.do_resize
_snake_case = size if size is not None else self.size
_snake_case = get_size_dict(lowerCamelCase , default_to_square=lowerCamelCase )
_snake_case = resample if resample is not None else self.resample
_snake_case = do_center_crop if do_center_crop is not None else self.do_center_crop
_snake_case = crop_size if crop_size is not None else self.crop_size
_snake_case = get_size_dict(lowerCamelCase )
_snake_case = do_rescale if do_rescale is not None else self.do_rescale
_snake_case = rescale_factor if rescale_factor is not None else self.rescale_factor
_snake_case = do_normalize if do_normalize is not None else self.do_normalize
_snake_case = image_mean if image_mean is not None else self.image_mean
_snake_case = image_std if image_std is not None else self.image_std
_snake_case = make_list_of_images(lowerCamelCase )
if not valid_images(lowerCamelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
_snake_case = [to_numpy_array(lowerCamelCase ) for image in images]
if do_resize:
_snake_case = [self.resize(image=lowerCamelCase , size=lowerCamelCase , resample=lowerCamelCase ) for image in images]
if do_center_crop:
_snake_case = [self.center_crop(image=lowerCamelCase , size=lowerCamelCase ) for image in images]
if do_rescale:
_snake_case = [self.rescale(image=lowerCamelCase , scale=lowerCamelCase ) for image in images]
if do_normalize:
_snake_case = [self.normalize(image=lowerCamelCase , mean=lowerCamelCase , std=lowerCamelCase ) for image in images]
_snake_case = [to_channel_dimension_format(lowerCamelCase , lowerCamelCase ) for image in images]
_snake_case = {"pixel_values": images}
return BatchFeature(data=lowerCamelCase , tensor_type=lowerCamelCase )
| 672 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.