code
stringlengths 87
55.2k
| code_codestyle
int64 0
349
| style_context
stringlengths 135
49.1k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
"""simple docstring"""
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
lowercase__ = datasets.load_iris()
lowercase__ = np.array(data["""data"""])
lowercase__ = np.array(data["""target"""])
lowercase__ = data["target_names"]
lowercase__ = train_test_split(X, y)
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase ) -> Any:
"""simple docstring"""
return np.linalg.norm(np.array(__A ) - np.array(__A ) )
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=5 ) -> Any:
"""simple docstring"""
lowerCAmelCase_ : Any = zip(__A , __A )
# List of distances of all points from the point to be classified
lowerCAmelCase_ : Tuple = []
for data_point in data:
lowerCAmelCase_ : List[str] = euclidean_distance(data_point[0] , __A )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
lowerCAmelCase_ : Tuple = [i[1] for i in sorted(__A )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
lowerCAmelCase_ : Dict = Counter(__A ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
| 241
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ : Any = logging.get_logger(__name__)
snake_case_ : Optional[Any] = {
"tiiuae/falcon-40b": "https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json",
"tiiuae/falcon-7b": "https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json",
}
class __snake_case ( a ):
UpperCAmelCase__ : Optional[Any] = '''falcon'''
UpperCAmelCase__ : List[Any] = ['''past_key_values''']
def __init__( self : Union[str, Any] , _snake_case : List[str]=65024 , _snake_case : int=4544 , _snake_case : int=32 , _snake_case : Any=71 , _snake_case : int=1e-5 , _snake_case : Dict=0.0_2 , _snake_case : int=True , _snake_case : List[Any]=0.0 , _snake_case : Tuple=0.0 , _snake_case : int=None , _snake_case : Tuple=False , _snake_case : Any=False , _snake_case : str=True , _snake_case : Any=True , _snake_case : List[str]=False , _snake_case : Tuple=11 , _snake_case : Dict=11 , **_snake_case : Optional[int] , ):
"""simple docstring"""
UpperCAmelCase_ = vocab_size
# Backward compatibility with n_embed kwarg
UpperCAmelCase_ = kwargs.pop('''n_embed''' , _snake_case)
UpperCAmelCase_ = hidden_size if n_embed is None else n_embed
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = layer_norm_epsilon
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = use_cache
UpperCAmelCase_ = hidden_dropout
UpperCAmelCase_ = attention_dropout
UpperCAmelCase_ = bos_token_id
UpperCAmelCase_ = eos_token_id
UpperCAmelCase_ = num_attention_heads if num_kv_heads is None else num_kv_heads
UpperCAmelCase_ = alibi
UpperCAmelCase_ = new_decoder_architecture
UpperCAmelCase_ = multi_query # Ignored when new_decoder_architecture is True
UpperCAmelCase_ = parallel_attn
UpperCAmelCase_ = bias
super().__init__(bos_token_id=_snake_case , eos_token_id=_snake_case , **_snake_case)
@property
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
return self.hidden_size // self.num_attention_heads
@property
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
return not self.alibi
| 51
| 0
|
'''simple docstring'''
from __future__ import annotations
def lowerCAmelCase (__A): # This function is recursive
"""simple docstring"""
_a = len(__A)
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
_a = array[0]
_a = False
_a = 1
_a = []
while not is_found and i < array_length:
if array[i] < pivot:
_a = True
_a = [element for element in array[i:] if element >= array[i]]
_a = longest_subsequence(__A)
if len(__A) > len(__A):
_a = temp_array
else:
i += 1
_a = [element for element in array[1:] if element >= pivot]
_a = [pivot, *longest_subsequence(__A)]
if len(__A) > len(__A):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 211
|
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
snake_case_ : str = 0
snake_case_ : Union[str, Any] = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
snake_case_ : str = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
snake_case_ : List[Any] = tuple[int, int]
class __snake_case :
def __init__( self : Any , _snake_case : int , _snake_case : int , _snake_case : int , _snake_case : int , _snake_case : int , _snake_case : Node | None , ):
"""simple docstring"""
UpperCAmelCase_ = pos_x
UpperCAmelCase_ = pos_y
UpperCAmelCase_ = (pos_y, pos_x)
UpperCAmelCase_ = goal_x
UpperCAmelCase_ = goal_y
UpperCAmelCase_ = g_cost
UpperCAmelCase_ = parent
UpperCAmelCase_ = self.calculate_heuristic()
UpperCAmelCase_ = self.g_cost + self.h_cost
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = self.pos_x - self.goal_x
UpperCAmelCase_ = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(_snake_case) + abs(_snake_case)
else:
return sqrt(dy**2 + dx**2)
def __lt__( self : Union[str, Any] , _snake_case : Node):
"""simple docstring"""
return self.f_cost < other.f_cost
class __snake_case :
def __init__( self : str , _snake_case : TPosition , _snake_case : TPosition):
"""simple docstring"""
UpperCAmelCase_ = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , _snake_case)
UpperCAmelCase_ = Node(goal[1] , goal[0] , goal[1] , goal[0] , 99999 , _snake_case)
UpperCAmelCase_ = [self.start]
UpperCAmelCase_ = []
UpperCAmelCase_ = False
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
UpperCAmelCase_ = self.open_nodes.pop(0)
if current_node.pos == self.target.pos:
return self.retrace_path(_snake_case)
self.closed_nodes.append(_snake_case)
UpperCAmelCase_ = self.get_successors(_snake_case)
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(_snake_case)
else:
# retrieve the best current path
UpperCAmelCase_ = self.open_nodes.pop(self.open_nodes.index(_snake_case))
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(_snake_case)
else:
self.open_nodes.append(_snake_case)
return [self.start.pos]
def lowerCamelCase ( self : Tuple , _snake_case : Node):
"""simple docstring"""
UpperCAmelCase_ = []
for action in delta:
UpperCAmelCase_ = parent.pos_x + action[1]
UpperCAmelCase_ = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0]) - 1 and 0 <= pos_y <= len(_snake_case) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
_snake_case , _snake_case , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , _snake_case , ))
return successors
def lowerCamelCase ( self : Any , _snake_case : Node | None):
"""simple docstring"""
UpperCAmelCase_ = node
UpperCAmelCase_ = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x))
UpperCAmelCase_ = current_node.parent
path.reverse()
return path
class __snake_case :
def __init__( self : Any , _snake_case : TPosition , _snake_case : TPosition):
"""simple docstring"""
UpperCAmelCase_ = AStar(_snake_case , _snake_case)
UpperCAmelCase_ = AStar(_snake_case , _snake_case)
UpperCAmelCase_ = False
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
UpperCAmelCase_ = self.fwd_astar.open_nodes.pop(0)
UpperCAmelCase_ = self.bwd_astar.open_nodes.pop(0)
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
_snake_case , _snake_case)
self.fwd_astar.closed_nodes.append(_snake_case)
self.bwd_astar.closed_nodes.append(_snake_case)
UpperCAmelCase_ = current_bwd_node
UpperCAmelCase_ = current_fwd_node
UpperCAmelCase_ = {
self.fwd_astar: self.fwd_astar.get_successors(_snake_case),
self.bwd_astar: self.bwd_astar.get_successors(_snake_case),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(_snake_case)
else:
# retrieve the best current path
UpperCAmelCase_ = astar.open_nodes.pop(
astar.open_nodes.index(_snake_case))
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(_snake_case)
else:
astar.open_nodes.append(_snake_case)
return [self.fwd_astar.start.pos]
def lowerCamelCase ( self : int , _snake_case : Node , _snake_case : Node):
"""simple docstring"""
UpperCAmelCase_ = self.fwd_astar.retrace_path(_snake_case)
UpperCAmelCase_ = self.bwd_astar.retrace_path(_snake_case)
bwd_path.pop()
bwd_path.reverse()
UpperCAmelCase_ = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
snake_case_ : Any = (0, 0)
snake_case_ : Union[str, Any] = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
snake_case_ : str = time.time()
snake_case_ : List[str] = AStar(init, goal)
snake_case_ : Optional[int] = a_star.search()
snake_case_ : Optional[Any] = time.time() - start_time
print(f"AStar execution time = {end_time:f} seconds")
snake_case_ : int = time.time()
snake_case_ : Dict = BidirectionalAStar(init, goal)
snake_case_ : str = time.time() - bd_start_time
print(f"BidirectionalAStar execution time = {bd_end_time:f} seconds")
| 51
| 0
|
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class a__ ( unittest.TestCase ):
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : List[Any] = UNetaDModel(
sample_size=(32, 64),in_channels=1,out_channels=1,layers_per_block=2,block_out_channels=(128, 128),down_block_types=("AttnDownBlock2D", "DownBlock2D"),up_block_types=("UpBlock2D", "AttnUpBlock2D"),)
return model
@property
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = UNetaDConditionModel(
sample_size=(64, 32),in_channels=1,out_channels=1,layers_per_block=2,block_out_channels=(128, 128),down_block_types=("CrossAttnDownBlock2D", "DownBlock2D"),up_block_types=("UpBlock2D", "CrossAttnUpBlock2D"),cross_attention_dim=10,)
return model
@property
def __UpperCamelCase ( self : Any ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : List[Any] = AutoencoderKL(
sample_size=(128, 64),in_channels=1,out_channels=1,latent_channels=1,layers_per_block=2,block_out_channels=(128, 128),down_block_types=("DownEncoderBlock2D", "DownEncoderBlock2D"),up_block_types=("UpDecoderBlock2D", "UpDecoderBlock2D"),)
SCREAMING_SNAKE_CASE_ : List[str] = UNetaDModel(
sample_size=(64, 32),in_channels=1,out_channels=1,layers_per_block=2,block_out_channels=(128, 128),down_block_types=("AttnDownBlock2D", "DownBlock2D"),up_block_types=("UpBlock2D", "AttnUpBlock2D"),)
return vqvae, unet
@slow
def __UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = "cpu" # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE_ : Tuple = Mel(
x_res=self.dummy_unet.config.sample_size[1],y_res=self.dummy_unet.config.sample_size[0],)
SCREAMING_SNAKE_CASE_ : Tuple = DDPMScheduler()
SCREAMING_SNAKE_CASE_ : Any = AudioDiffusionPipeline(vqvae=_snake_case,unet=self.dummy_unet,mel=_snake_case,scheduler=_snake_case )
SCREAMING_SNAKE_CASE_ : Any = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
SCREAMING_SNAKE_CASE_ : Any = torch.Generator(device=_snake_case ).manual_seed(42 )
SCREAMING_SNAKE_CASE_ : Any = pipe(generator=_snake_case,steps=4 )
SCREAMING_SNAKE_CASE_ : Dict = output.audios[0]
SCREAMING_SNAKE_CASE_ : Any = output.images[0]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.Generator(device=_snake_case ).manual_seed(42 )
SCREAMING_SNAKE_CASE_ : str = pipe(generator=_snake_case,steps=4,return_dict=_snake_case )
SCREAMING_SNAKE_CASE_ : Dict = output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
SCREAMING_SNAKE_CASE_ : Optional[Any] = np.frombuffer(image.tobytes(),dtype="uint8" )[:10]
SCREAMING_SNAKE_CASE_ : int = np.frombuffer(image_from_tuple.tobytes(),dtype="uint8" )[:10]
SCREAMING_SNAKE_CASE_ : int = np.array([69, 255, 255, 255, 0, 0, 77, 181, 12, 127] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0
SCREAMING_SNAKE_CASE_ : Any = Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1],y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0],)
SCREAMING_SNAKE_CASE_ : int = DDIMScheduler()
SCREAMING_SNAKE_CASE_ : Optional[int] = self.dummy_vqvae_and_unet
SCREAMING_SNAKE_CASE_ : Optional[Any] = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0],unet=dummy_vqvae_and_unet[1],mel=_snake_case,scheduler=_snake_case )
SCREAMING_SNAKE_CASE_ : Tuple = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
np.random.seed(0 )
SCREAMING_SNAKE_CASE_ : Any = np.random.uniform(-1,1,((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) )
SCREAMING_SNAKE_CASE_ : List[Any] = torch.Generator(device=_snake_case ).manual_seed(42 )
SCREAMING_SNAKE_CASE_ : Optional[Any] = pipe(raw_audio=_snake_case,generator=_snake_case,start_step=5,steps=10 )
SCREAMING_SNAKE_CASE_ : Optional[int] = output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
SCREAMING_SNAKE_CASE_ : str = np.frombuffer(image.tobytes(),dtype="uint8" )[:10]
SCREAMING_SNAKE_CASE_ : List[Any] = np.array([120, 117, 110, 109, 138, 167, 138, 148, 132, 121] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
SCREAMING_SNAKE_CASE_ : Optional[int] = self.dummy_unet_condition
SCREAMING_SNAKE_CASE_ : Union[str, Any] = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0],unet=_snake_case,mel=_snake_case,scheduler=_snake_case )
SCREAMING_SNAKE_CASE_ : Optional[int] = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
np.random.seed(0 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.rand((1, 1, 10) )
SCREAMING_SNAKE_CASE_ : Optional[int] = pipe(generator=_snake_case,encoding=_snake_case )
SCREAMING_SNAKE_CASE_ : Optional[int] = output.images[0]
SCREAMING_SNAKE_CASE_ : str = np.frombuffer(image.tobytes(),dtype="uint8" )[:10]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = np.array([107, 103, 120, 127, 142, 122, 113, 122, 97, 111] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
@slow
@require_torch_gpu
class a__ ( unittest.TestCase ):
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = torch_device
SCREAMING_SNAKE_CASE_ : Union[str, Any] = DiffusionPipeline.from_pretrained("teticio/audio-diffusion-ddim-256" )
SCREAMING_SNAKE_CASE_ : List[Any] = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
SCREAMING_SNAKE_CASE_ : List[str] = torch.Generator(device=_snake_case ).manual_seed(42 )
SCREAMING_SNAKE_CASE_ : Tuple = pipe(generator=_snake_case )
SCREAMING_SNAKE_CASE_ : List[str] = output.audios[0]
SCREAMING_SNAKE_CASE_ : Dict = output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
SCREAMING_SNAKE_CASE_ : Dict = np.frombuffer(image.tobytes(),dtype="uint8" )[:10]
SCREAMING_SNAKE_CASE_ : Dict = np.array([151, 167, 154, 144, 122, 134, 121, 105, 70, 26] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
| 18
|
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class __snake_case :
def __init__( self : List[str] , _snake_case : Union[str, Any] , _snake_case : List[str]=2 , _snake_case : Any=True , _snake_case : Any=False , _snake_case : List[str]=10 , _snake_case : Any=3 , _snake_case : Union[str, Any]=32 * 4 , _snake_case : List[Any]=32 * 6 , _snake_case : Tuple=4 , _snake_case : Dict=32 , ):
"""simple docstring"""
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = is_training
UpperCAmelCase_ = use_auxiliary_loss
UpperCAmelCase_ = num_queries
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = min_size
UpperCAmelCase_ = max_size
UpperCAmelCase_ = num_labels
UpperCAmelCase_ = mask_feature_size
def lowerCamelCase ( self : Any):
"""simple docstring"""
UpperCAmelCase_ = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size]).to(
_snake_case)
UpperCAmelCase_ = torch.ones([self.batch_size, self.min_size, self.max_size] , device=_snake_case)
UpperCAmelCase_ = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=_snake_case) > 0.5
).float()
UpperCAmelCase_ = (torch.rand((self.batch_size, self.num_labels) , device=_snake_case) > 0.5).long()
UpperCAmelCase_ = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def lowerCamelCase ( self : Any):
"""simple docstring"""
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig(
decoder_ffn_dim=128 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , )
def lowerCamelCase ( self : int):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = self.prepare_config_and_inputs()
UpperCAmelCase_ = {'''pixel_values''': pixel_values, '''pixel_mask''': pixel_mask}
return config, inputs_dict
def lowerCamelCase ( self : str , _snake_case : List[Any] , _snake_case : List[str]):
"""simple docstring"""
UpperCAmelCase_ = output.encoder_hidden_states
UpperCAmelCase_ = output.pixel_decoder_hidden_states
UpperCAmelCase_ = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(_snake_case) , len(config.backbone_config.depths))
self.parent.assertTrue(len(_snake_case) , len(config.backbone_config.depths))
self.parent.assertTrue(len(_snake_case) , config.decoder_config.decoder_layers)
def lowerCamelCase ( self : Union[str, Any] , _snake_case : List[str] , _snake_case : int , _snake_case : Optional[Any] , _snake_case : str=False):
"""simple docstring"""
with torch.no_grad():
UpperCAmelCase_ = MaskFormerModel(config=_snake_case)
model.to(_snake_case)
model.eval()
UpperCAmelCase_ = model(pixel_values=_snake_case , pixel_mask=_snake_case)
UpperCAmelCase_ = model(_snake_case , output_hidden_states=_snake_case)
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None)
self.parent.assertTrue(output.encoder_last_hidden_state is not None)
if output_hidden_states:
self.check_output_hidden_state(_snake_case , _snake_case)
def lowerCamelCase ( self : List[Any] , _snake_case : List[Any] , _snake_case : List[Any] , _snake_case : str , _snake_case : Optional[int] , _snake_case : Union[str, Any]):
"""simple docstring"""
UpperCAmelCase_ = MaskFormerForInstanceSegmentation(config=_snake_case)
model.to(_snake_case)
model.eval()
def comm_check_on_output(_snake_case : Tuple):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None)
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None)
self.parent.assertTrue(result.encoder_last_hidden_state is not None)
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1))
with torch.no_grad():
UpperCAmelCase_ = model(pixel_values=_snake_case , pixel_mask=_snake_case)
UpperCAmelCase_ = model(_snake_case)
comm_check_on_output(_snake_case)
UpperCAmelCase_ = model(
pixel_values=_snake_case , pixel_mask=_snake_case , mask_labels=_snake_case , class_labels=_snake_case)
comm_check_on_output(_snake_case)
self.parent.assertTrue(result.loss is not None)
self.parent.assertEqual(result.loss.shape , torch.Size([1]))
@require_torch
class __snake_case ( a , a , unittest.TestCase ):
UpperCAmelCase__ : Union[str, Any] = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
UpperCAmelCase__ : Optional[Any] = (
{'''feature-extraction''': MaskFormerModel, '''image-segmentation''': MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
UpperCAmelCase__ : Dict = False
UpperCAmelCase__ : List[str] = False
UpperCAmelCase__ : Optional[Any] = False
UpperCAmelCase__ : Union[str, Any] = False
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = MaskFormerModelTester(self)
UpperCAmelCase_ = ConfigTester(self , config_class=_snake_case , has_text_modality=_snake_case)
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(_snake_case , **_snake_case , output_hidden_states=_snake_case)
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*_snake_case)
@unittest.skip(reason='''MaskFormer does not use inputs_embeds''')
def lowerCamelCase ( self : Dict):
"""simple docstring"""
pass
@unittest.skip(reason='''MaskFormer does not have a get_input_embeddings method''')
def lowerCamelCase ( self : int):
"""simple docstring"""
pass
@unittest.skip(reason='''MaskFormer is not a generative model''')
def lowerCamelCase ( self : str):
"""simple docstring"""
pass
@unittest.skip(reason='''MaskFormer does not use token embeddings''')
def lowerCamelCase ( self : int):
"""simple docstring"""
pass
@require_torch_multi_gpu
@unittest.skip(
reason='''MaskFormer has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''')
def lowerCamelCase ( self : Any):
"""simple docstring"""
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''')
def lowerCamelCase ( self : str):
"""simple docstring"""
pass
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(_snake_case)
UpperCAmelCase_ = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ = [*signature.parameters.keys()]
UpperCAmelCase_ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _snake_case)
@slow
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
for model_name in ["facebook/maskformer-swin-small-coco"]:
UpperCAmelCase_ = MaskFormerModel.from_pretrained(_snake_case)
self.assertIsNotNone(_snake_case)
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ = (self.model_tester.min_size,) * 2
UpperCAmelCase_ = {
'''pixel_values''': torch.randn((2, 3, *size) , device=_snake_case),
'''mask_labels''': torch.randn((2, 10, *size) , device=_snake_case),
'''class_labels''': torch.zeros(2 , 10 , device=_snake_case).long(),
}
UpperCAmelCase_ = MaskFormerForInstanceSegmentation(MaskFormerConfig()).to(_snake_case)
UpperCAmelCase_ = model(**_snake_case)
self.assertTrue(outputs.loss is not None)
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(_snake_case , **_snake_case , output_hidden_states=_snake_case)
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(_snake_case).to(_snake_case)
UpperCAmelCase_ = model(**_snake_case , output_attentions=_snake_case)
self.assertTrue(outputs.attentions is not None)
def lowerCamelCase ( self : int):
"""simple docstring"""
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
UpperCAmelCase_ = self.all_model_classes[1]
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
UpperCAmelCase_ = model_class(_snake_case)
model.to(_snake_case)
model.train()
UpperCAmelCase_ = model(_snake_case , mask_labels=_snake_case , class_labels=_snake_case).loss
loss.backward()
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
UpperCAmelCase_ = self.all_model_classes[1]
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
UpperCAmelCase_ = True
UpperCAmelCase_ = True
UpperCAmelCase_ = model_class(_snake_case)
model.to(_snake_case)
model.train()
UpperCAmelCase_ = model(_snake_case , mask_labels=_snake_case , class_labels=_snake_case)
UpperCAmelCase_ = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
UpperCAmelCase_ = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
UpperCAmelCase_ = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
UpperCAmelCase_ = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=_snake_case)
self.assertIsNotNone(encoder_hidden_states.grad)
self.assertIsNotNone(pixel_decoder_hidden_states.grad)
self.assertIsNotNone(transformer_decoder_hidden_states.grad)
self.assertIsNotNone(attentions.grad)
snake_case_ : Dict = 1e-4
def A () -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_vision
@slow
class __snake_case ( unittest.TestCase ):
@cached_property
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
return (
MaskFormerImageProcessor.from_pretrained('''facebook/maskformer-swin-small-coco''')
if is_vision_available()
else None
)
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = MaskFormerModel.from_pretrained('''facebook/maskformer-swin-small-coco''').to(_snake_case)
UpperCAmelCase_ = self.default_image_processor
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = image_processor(_snake_case , return_tensors='''pt''').to(_snake_case)
UpperCAmelCase_ = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0)
# check size
self.assertEqual(_snake_case , (1, 3, 800, 1088))
with torch.no_grad():
UpperCAmelCase_ = model(**_snake_case)
UpperCAmelCase_ = torch.tensor(
[[-0.0_4_8_2, 0.9_2_2_8, 0.4_9_5_1], [-0.2_5_4_7, 0.8_0_1_7, 0.8_5_2_7], [-0.0_0_6_9, 0.3_3_8_5, -0.0_0_8_9]]).to(_snake_case)
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , _snake_case , atol=_snake_case))
UpperCAmelCase_ = torch.tensor(
[[-0.8_4_2_2, -0.8_4_3_4, -0.9_7_1_8], [-1.0_1_4_4, -0.5_5_6_5, -0.4_1_9_5], [-1.0_0_3_8, -0.4_4_8_4, -0.1_9_6_1]]).to(_snake_case)
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , _snake_case , atol=_snake_case))
UpperCAmelCase_ = torch.tensor(
[[0.2_8_5_2, -0.0_1_5_9, 0.9_7_3_5], [0.6_2_5_4, 0.1_8_5_8, 0.8_5_2_9], [-0.0_6_8_0, -0.4_1_1_6, 1.8_4_1_3]]).to(_snake_case)
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , _snake_case , atol=_snake_case))
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''')
.to(_snake_case)
.eval()
)
UpperCAmelCase_ = self.default_image_processor
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = image_processor(_snake_case , return_tensors='''pt''').to(_snake_case)
UpperCAmelCase_ = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0)
# check size
self.assertEqual(_snake_case , (1, 3, 800, 1088))
with torch.no_grad():
UpperCAmelCase_ = model(**_snake_case)
# masks_queries_logits
UpperCAmelCase_ = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
UpperCAmelCase_ = [
[-1.3_7_3_7_1_2_4, -1.7_7_2_4_9_3_7, -1.9_3_6_4_2_3_3],
[-1.5_9_7_7_2_8_1, -1.9_8_6_7_9_3_9, -2.1_5_2_3_6_9_5],
[-1.5_7_9_5_3_9_8, -1.9_2_6_9_8_3_2, -2.0_9_3_9_4_2],
]
UpperCAmelCase_ = torch.tensor(_snake_case).to(_snake_case)
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , _snake_case , atol=_snake_case))
# class_queries_logits
UpperCAmelCase_ = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1))
UpperCAmelCase_ = torch.tensor(
[
[1.65_12e00, -5.25_72e00, -3.35_19e00],
[3.61_69e-02, -5.90_25e00, -2.93_13e00],
[1.07_66e-04, -7.76_30e00, -5.12_63e00],
]).to(_snake_case)
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , _snake_case , atol=_snake_case))
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-resnet101-coco-stuff''')
.to(_snake_case)
.eval()
)
UpperCAmelCase_ = self.default_image_processor
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = image_processor(_snake_case , return_tensors='''pt''').to(_snake_case)
UpperCAmelCase_ = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0)
# check size
self.assertEqual(_snake_case , (1, 3, 800, 1088))
with torch.no_grad():
UpperCAmelCase_ = model(**_snake_case)
# masks_queries_logits
UpperCAmelCase_ = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
UpperCAmelCase_ = [[-0.9_0_4_6, -2.6_3_6_6, -4.6_0_6_2], [-3.4_1_7_9, -5.7_8_9_0, -8.8_0_5_7], [-4.9_1_7_9, -7.6_5_6_0, -1_0.7_7_1_1]]
UpperCAmelCase_ = torch.tensor(_snake_case).to(_snake_case)
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , _snake_case , atol=_snake_case))
# class_queries_logits
UpperCAmelCase_ = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1))
UpperCAmelCase_ = torch.tensor(
[[4.7_1_8_8, -3.2_5_8_5, -2.8_8_5_7], [6.6_8_7_1, -2.9_1_8_1, -1.2_4_8_7], [7.2_4_4_9, -2.2_7_6_4, -2.1_8_7_4]]).to(_snake_case)
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , _snake_case , atol=_snake_case))
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''')
.to(_snake_case)
.eval()
)
UpperCAmelCase_ = self.default_image_processor
UpperCAmelCase_ = image_processor(
[np.zeros((3, 800, 1333)), np.zeros((3, 800, 1333))] , segmentation_maps=[np.zeros((384, 384)).astype(np.floataa), np.zeros((384, 384)).astype(np.floataa)] , return_tensors='''pt''' , )
UpperCAmelCase_ = inputs['''pixel_values'''].to(_snake_case)
UpperCAmelCase_ = [el.to(_snake_case) for el in inputs['''mask_labels''']]
UpperCAmelCase_ = [el.to(_snake_case) for el in inputs['''class_labels''']]
with torch.no_grad():
UpperCAmelCase_ = model(**_snake_case)
self.assertTrue(outputs.loss is not None)
| 51
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase = {
"configuration_git": ["GIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "GitConfig", "GitVisionConfig"],
"processing_git": ["GitProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
"GIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"GitForCausalLM",
"GitModel",
"GitPreTrainedModel",
"GitVisionModel",
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 84
|
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def A (__A : Optional[int] , __A : int , __A : str=None ) -> List[Any]:
"""simple docstring"""
assert torch_layer.weight.shape == weight.shape, F"""{torch_layer} layer.weight does not match"""
UpperCAmelCase_ = nn.Parameter(__A )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, F"""{torch_layer} layer.bias does not match"""
UpperCAmelCase_ = nn.Parameter(__A )
def A (__A : Tuple , __A : Dict , __A : str ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ = np.asarray(weights[0] )
UpperCAmelCase_ = np.asarray(weights[1] )
UpperCAmelCase_ = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(__A ).transpose(1 , 2 ).contiguous().view(-1 , __A ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(__A ).transpose(1 , 2 ).contiguous().view(-1 , __A ) , )
set_param(
torch_layer.output.dense , torch.tensor(__A ).view(-1 , __A ).contiguous().transpose(0 , 1 ) , )
def A (__A : Optional[Any] , __A : Any , __A : List[Any] ) -> int:
"""simple docstring"""
UpperCAmelCase_ = np.asarray(weights[0] )
UpperCAmelCase_ = np.asarray(weights[1] )
UpperCAmelCase_ = np.asarray(weights[2] )
UpperCAmelCase_ = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(__A ).transpose(1 , 2 ).contiguous().view(-1 , __A ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(__A ).transpose(1 , 2 ).contiguous().view(-1 , __A ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(__A ).transpose(1 , 2 ).contiguous().view(-1 , __A ) , )
set_param(
torch_layer.output.dense , torch.tensor(__A ).view(-1 , __A ).contiguous().transpose(0 , 1 ) , )
def A (__A : int , __A : Union[str, Any] , __A : List[str] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ = weights[0][0][0]
UpperCAmelCase_ = np.asarray(layer_norm_a[0] )
UpperCAmelCase_ = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(__A ) , torch.tensor(__A ) , )
# lsh weights + output
UpperCAmelCase_ = weights[0][1]
if len(__A ) < 4:
set_layer_weights_in_torch_lsh(__A , torch_block.attention , __A )
else:
set_layer_weights_in_torch_local(__A , torch_block.attention , __A )
# intermediate weighs
UpperCAmelCase_ = weights[2][0][1][2]
# Chunked Feed Forward
if len(__A ) == 4:
UpperCAmelCase_ = intermediate_weights[2]
# layernorm 2
UpperCAmelCase_ = np.asarray(intermediate_weights[0][0] )
UpperCAmelCase_ = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(__A ) , torch.tensor(__A ) , )
# intermediate dense
UpperCAmelCase_ = np.asarray(intermediate_weights[1][0] )
UpperCAmelCase_ = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(__A ).transpose(0 , 1 ).contiguous() , torch.tensor(__A ) , )
# intermediate out
UpperCAmelCase_ = np.asarray(intermediate_weights[4][0] )
UpperCAmelCase_ = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(__A ).transpose(0 , 1 ).contiguous() , torch.tensor(__A ) , )
def A (__A : Optional[int] , __A : Tuple , __A : Any ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ = torch_model.reformer
# word embeds
UpperCAmelCase_ = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(__A ) , )
if isinstance(weights[3] , __A ):
UpperCAmelCase_ = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
UpperCAmelCase_ = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), F"""{position_embeddings[emb_idx]} emb does not match"""
UpperCAmelCase_ = nn.Parameter(torch.tensor(__A ) )
UpperCAmelCase_ = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
__A ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
UpperCAmelCase_ = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(__A , __A , __A )
# output layer norm
UpperCAmelCase_ = np.asarray(weights[7][0] )
UpperCAmelCase_ = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(__A ) , torch.tensor(__A ) , )
# output embeddings
UpperCAmelCase_ = np.asarray(weights[9][0] )
UpperCAmelCase_ = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(__A ).transpose(0 , 1 ).contiguous() , torch.tensor(__A ) , )
def A (__A : Tuple , __A : int , __A : str ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ = ReformerConfig.from_json_file(__A )
print(F"""Building PyTorch model from configuration: {config}""" )
UpperCAmelCase_ = ReformerModelWithLMHead(__A )
with open(__A , '''rb''' ) as f:
UpperCAmelCase_ = pickle.load(__A )['''weights''']
set_model_weights_in_torch(__A , __A , config.hidden_size )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , __A )
if __name__ == "__main__":
snake_case_ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--trax_model_pkl_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained Reformer model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
snake_case_ : List[Any] = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 51
| 0
|
'''simple docstring'''
import inspect
import tempfile
from collections import OrderedDict, UserDict
from collections.abc import MutableMapping
from contextlib import ExitStack, contextmanager
from dataclasses import fields
from enum import Enum
from typing import Any, ContextManager, List, Tuple
import numpy as np
from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy
if is_flax_available():
import jax.numpy as jnp
class SCREAMING_SNAKE_CASE( A__ ):
"""simple docstring"""
def __get__( self : List[str] , __snake_case : Union[str, Any] , __snake_case : int=None ) -> int:
if obj is None:
return self
if self.fget is None:
raise AttributeError('''unreadable attribute''' )
UpperCAmelCase : int = '''__cached_''' + self.fget.__name__
UpperCAmelCase : Optional[int] = getattr(_snake_case , _snake_case , _snake_case )
if cached is None:
UpperCAmelCase : Any = self.fget(_snake_case )
setattr(_snake_case , _snake_case , _snake_case )
return cached
def snake_case_ ( _lowerCAmelCase : Tuple ) -> str:
UpperCAmelCase : Optional[int] = val.lower()
if val in {"y", "yes", "t", "true", "on", "1"}:
return 1
if val in {"n", "no", "f", "false", "off", "0"}:
return 0
raise ValueError(f"""invalid truth value {val!r}""" )
def snake_case_ ( _lowerCAmelCase : Tuple ) -> str:
if is_torch_fx_proxy(__A ):
return True
if is_torch_available():
import torch
if isinstance(__A , torch.Tensor ):
return True
if is_tf_available():
import tensorflow as tf
if isinstance(__A , tf.Tensor ):
return True
if is_flax_available():
import jax.numpy as jnp
from jax.core import Tracer
if isinstance(__A , (jnp.ndarray, Tracer) ):
return True
return isinstance(__A , np.ndarray )
def snake_case_ ( _lowerCAmelCase : str ) -> Dict:
return isinstance(__A , np.ndarray )
def snake_case_ ( _lowerCAmelCase : Dict ) -> Any:
return _is_numpy(__A )
def snake_case_ ( _lowerCAmelCase : Union[str, Any] ) -> int:
import torch
return isinstance(__A , torch.Tensor )
def snake_case_ ( _lowerCAmelCase : Any ) -> List[str]:
return False if not is_torch_available() else _is_torch(__A )
def snake_case_ ( _lowerCAmelCase : Optional[int] ) -> List[str]:
import torch
return isinstance(__A , torch.device )
def snake_case_ ( _lowerCAmelCase : Union[str, Any] ) -> Any:
return False if not is_torch_available() else _is_torch_device(__A )
def snake_case_ ( _lowerCAmelCase : Union[str, Any] ) -> Tuple:
import torch
if isinstance(__A , __A ):
if hasattr(__A , __A ):
UpperCAmelCase : List[str] = getattr(__A , __A )
else:
return False
return isinstance(__A , torch.dtype )
def snake_case_ ( _lowerCAmelCase : Optional[Any] ) -> Tuple:
return False if not is_torch_available() else _is_torch_dtype(__A )
def snake_case_ ( _lowerCAmelCase : Union[str, Any] ) -> List[Any]:
import tensorflow as tf
return isinstance(__A , tf.Tensor )
def snake_case_ ( _lowerCAmelCase : Tuple ) -> int:
return False if not is_tf_available() else _is_tensorflow(__A )
def snake_case_ ( _lowerCAmelCase : Tuple ) -> int:
import tensorflow as tf
# the `is_symbolic_tensor` predicate is only available starting with TF 2.14
if hasattr(__A , '''is_symbolic_tensor''' ):
return tf.is_symbolic_tensor(__A )
return type(__A ) == tf.Tensor
def snake_case_ ( _lowerCAmelCase : Union[str, Any] ) -> Union[str, Any]:
return False if not is_tf_available() else _is_tf_symbolic_tensor(__A )
def snake_case_ ( _lowerCAmelCase : Optional[Any] ) -> str:
import jax.numpy as jnp # noqa: F811
return isinstance(__A , jnp.ndarray )
def snake_case_ ( _lowerCAmelCase : int ) -> Optional[Any]:
return False if not is_flax_available() else _is_jax(__A )
def snake_case_ ( _lowerCAmelCase : List[str] ) -> int:
if isinstance(__A , (dict, UserDict) ):
return {k: to_py_obj(__A ) for k, v in obj.items()}
elif isinstance(__A , (list, tuple) ):
return [to_py_obj(__A ) for o in obj]
elif is_tf_tensor(__A ):
return obj.numpy().tolist()
elif is_torch_tensor(__A ):
return obj.detach().cpu().tolist()
elif is_jax_tensor(__A ):
return np.asarray(__A ).tolist()
elif isinstance(__A , (np.ndarray, np.number) ): # tolist also works on 0d np arrays
return obj.tolist()
else:
return obj
def snake_case_ ( _lowerCAmelCase : Dict ) -> List[Any]:
if isinstance(__A , (dict, UserDict) ):
return {k: to_numpy(__A ) for k, v in obj.items()}
elif isinstance(__A , (list, tuple) ):
return np.array(__A )
elif is_tf_tensor(__A ):
return obj.numpy()
elif is_torch_tensor(__A ):
return obj.detach().cpu().numpy()
elif is_jax_tensor(__A ):
return np.asarray(__A )
else:
return obj
class SCREAMING_SNAKE_CASE( A__ ):
"""simple docstring"""
def A ( self : str ) -> List[Any]:
UpperCAmelCase : List[Any] = fields(self )
# Safety and consistency checks
if not len(_snake_case ):
raise ValueError(F"""{self.__class__.__name__} has no fields.""" )
if not all(field.default is None for field in class_fields[1:] ):
raise ValueError(F"""{self.__class__.__name__} should not have more than one required field.""" )
UpperCAmelCase : Dict = getattr(self , class_fields[0].name )
UpperCAmelCase : str = all(getattr(self , field.name ) is None for field in class_fields[1:] )
if other_fields_are_none and not is_tensor(_snake_case ):
if isinstance(_snake_case , _snake_case ):
UpperCAmelCase : Dict = first_field.items()
UpperCAmelCase : Optional[Any] = True
else:
try:
UpperCAmelCase : str = iter(_snake_case )
UpperCAmelCase : Dict = True
except TypeError:
UpperCAmelCase : List[Any] = False
# if we provided an iterator as first field and the iterator is a (key, value) iterator
# set the associated fields
if first_field_iterator:
for idx, element in enumerate(_snake_case ):
if (
not isinstance(_snake_case , (list, tuple) )
or not len(_snake_case ) == 2
or not isinstance(element[0] , _snake_case )
):
if idx == 0:
# If we do not have an iterator of key/values, set it as attribute
UpperCAmelCase : Any = first_field
else:
# If we have a mixed iterator, raise an error
raise ValueError(
F"""Cannot set key/value for {element}. It needs to be a tuple (key, value).""" )
break
setattr(self , element[0] , element[1] )
if element[1] is not None:
UpperCAmelCase : List[str] = element[1]
elif first_field is not None:
UpperCAmelCase : Any = first_field
else:
for field in class_fields:
UpperCAmelCase : Optional[Any] = getattr(self , field.name )
if v is not None:
UpperCAmelCase : Optional[Any] = v
def __delitem__( self : Tuple , *__snake_case : str , **__snake_case : Dict ) -> List[Any]:
raise Exception(F"""You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.""" )
def A ( self : int , *__snake_case : Optional[int] , **__snake_case : Dict ) -> int:
raise Exception(F"""You cannot use ``setdefault`` on a {self.__class__.__name__} instance.""" )
def A ( self : Dict , *__snake_case : Optional[int] , **__snake_case : Optional[int] ) -> str:
raise Exception(F"""You cannot use ``pop`` on a {self.__class__.__name__} instance.""" )
def A ( self : Optional[int] , *__snake_case : int , **__snake_case : Optional[Any] ) -> Union[str, Any]:
raise Exception(F"""You cannot use ``update`` on a {self.__class__.__name__} instance.""" )
def __getitem__( self : Any , __snake_case : Optional[int] ) -> Any:
if isinstance(_snake_case , _snake_case ):
UpperCAmelCase : Union[str, Any] = dict(self.items() )
return inner_dict[k]
else:
return self.to_tuple()[k]
def __setattr__( self : List[Any] , __snake_case : List[Any] , __snake_case : List[str] ) -> Optional[Any]:
if name in self.keys() and value is not None:
# Don't call self.__setitem__ to avoid recursion errors
super().__setitem__(_snake_case , _snake_case )
super().__setattr__(_snake_case , _snake_case )
def __setitem__( self : Dict , __snake_case : Optional[Any] , __snake_case : List[str] ) -> Union[str, Any]:
super().__setitem__(_snake_case , _snake_case )
# Don't call self.__setattr__ to avoid recursion errors
super().__setattr__(_snake_case , _snake_case )
def A ( self : Dict ) -> Union[str, Any]:
return tuple(self[k] for k in self.keys() )
class SCREAMING_SNAKE_CASE( A__ , A__ ):
"""simple docstring"""
@classmethod
def A ( cls : List[Any] , __snake_case : Optional[Any] ) -> Any:
raise ValueError(
F"""{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys() )}""" )
class SCREAMING_SNAKE_CASE( A__ ):
"""simple docstring"""
lowerCamelCase__ = '''longest'''
lowerCamelCase__ = '''max_length'''
lowerCamelCase__ = '''do_not_pad'''
class SCREAMING_SNAKE_CASE( A__ ):
"""simple docstring"""
lowerCamelCase__ = '''pt'''
lowerCamelCase__ = '''tf'''
lowerCamelCase__ = '''np'''
lowerCamelCase__ = '''jax'''
class SCREAMING_SNAKE_CASE:
"""simple docstring"""
def __init__( self : List[str] , __snake_case : List[ContextManager] ) -> Optional[int]:
UpperCAmelCase : List[str] = context_managers
UpperCAmelCase : Any = ExitStack()
def __enter__( self : str ) -> Optional[Any]:
for context_manager in self.context_managers:
self.stack.enter_context(_snake_case )
def __exit__( self : int , *__snake_case : List[str] , **__snake_case : Optional[Any] ) -> Optional[int]:
self.stack.__exit__(*_snake_case , **_snake_case )
def snake_case_ ( _lowerCAmelCase : List[Any] ) -> str:
UpperCAmelCase : Optional[Any] = infer_framework(__A )
if framework == "tf":
UpperCAmelCase : str = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
UpperCAmelCase : str = inspect.signature(model_class.forward ) # PyTorch models
else:
UpperCAmelCase : Union[str, Any] = inspect.signature(model_class.__call__ ) # Flax models
for p in signature.parameters:
if p == "return_loss" and signature.parameters[p].default is True:
return True
return False
def snake_case_ ( _lowerCAmelCase : Union[str, Any] ) -> int:
UpperCAmelCase : Union[str, Any] = model_class.__name__
UpperCAmelCase : int = infer_framework(__A )
if framework == "tf":
UpperCAmelCase : List[Any] = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
UpperCAmelCase : Optional[int] = inspect.signature(model_class.forward ) # PyTorch models
else:
UpperCAmelCase : int = inspect.signature(model_class.__call__ ) # Flax models
if "QuestionAnswering" in model_name:
return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")]
else:
return [p for p in signature.parameters if "label" in p]
def snake_case_ ( _lowerCAmelCase : MutableMapping , _lowerCAmelCase : str = "" , _lowerCAmelCase : str = "." ) -> Optional[Any]:
def _flatten_dict(_lowerCAmelCase : Optional[Any] , _lowerCAmelCase : int="" , _lowerCAmelCase : Any="." ):
for k, v in d.items():
UpperCAmelCase : str = str(__A ) + delimiter + str(__A ) if parent_key else k
if v and isinstance(__A , __A ):
yield from flatten_dict(__A , __A , delimiter=__A ).items()
else:
yield key, v
return dict(_flatten_dict(__A , __A , __A ) )
@contextmanager
def snake_case_ ( _lowerCAmelCase : Dict , _lowerCAmelCase : bool = False ) -> str:
if use_temp_dir:
with tempfile.TemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield working_dir
def snake_case_ ( _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[int]=None ) -> Any:
if is_numpy_array(__A ):
return np.transpose(__A , axes=__A )
elif is_torch_tensor(__A ):
return array.T if axes is None else array.permute(*__A )
elif is_tf_tensor(__A ):
import tensorflow as tf
return tf.transpose(__A , perm=__A )
elif is_jax_tensor(__A ):
return jnp.transpose(__A , axes=__A )
else:
raise ValueError(f"""Type not supported for transpose: {type(__A )}.""" )
def snake_case_ ( _lowerCAmelCase : str , _lowerCAmelCase : List[Any] ) -> Union[str, Any]:
if is_numpy_array(__A ):
return np.reshape(__A , __A )
elif is_torch_tensor(__A ):
return array.reshape(*__A )
elif is_tf_tensor(__A ):
import tensorflow as tf
return tf.reshape(__A , __A )
elif is_jax_tensor(__A ):
return jnp.reshape(__A , __A )
else:
raise ValueError(f"""Type not supported for reshape: {type(__A )}.""" )
def snake_case_ ( _lowerCAmelCase : List[str] , _lowerCAmelCase : Dict=None ) -> int:
if is_numpy_array(__A ):
return np.squeeze(__A , axis=__A )
elif is_torch_tensor(__A ):
return array.squeeze() if axis is None else array.squeeze(dim=__A )
elif is_tf_tensor(__A ):
import tensorflow as tf
return tf.squeeze(__A , axis=__A )
elif is_jax_tensor(__A ):
return jnp.squeeze(__A , axis=__A )
else:
raise ValueError(f"""Type not supported for squeeze: {type(__A )}.""" )
def snake_case_ ( _lowerCAmelCase : str , _lowerCAmelCase : Dict ) -> List[str]:
if is_numpy_array(__A ):
return np.expand_dims(__A , __A )
elif is_torch_tensor(__A ):
return array.unsqueeze(dim=__A )
elif is_tf_tensor(__A ):
import tensorflow as tf
return tf.expand_dims(__A , axis=__A )
elif is_jax_tensor(__A ):
return jnp.expand_dims(__A , axis=__A )
else:
raise ValueError(f"""Type not supported for expand_dims: {type(__A )}.""" )
def snake_case_ ( _lowerCAmelCase : List[str] ) -> Optional[Any]:
if is_numpy_array(__A ):
return np.size(__A )
elif is_torch_tensor(__A ):
return array.numel()
elif is_tf_tensor(__A ):
import tensorflow as tf
return tf.size(__A )
elif is_jax_tensor(__A ):
return array.size
else:
raise ValueError(f"""Type not supported for expand_dims: {type(__A )}.""" )
def snake_case_ ( _lowerCAmelCase : int , _lowerCAmelCase : Dict ) -> Dict:
for key, value in auto_map.items():
if isinstance(__A , (tuple, list) ):
UpperCAmelCase : Tuple = [f"""{repo_id}--{v}""" if (v is not None and '''--''' not in v) else v for v in value]
elif value is not None and "--" not in value:
UpperCAmelCase : Optional[int] = f"""{repo_id}--{value}"""
return auto_map
def snake_case_ ( _lowerCAmelCase : List[str] ) -> List[str]:
for base_class in inspect.getmro(__A ):
UpperCAmelCase : Optional[int] = base_class.__module__
UpperCAmelCase : Any = base_class.__name__
if module.startswith('''tensorflow''' ) or module.startswith('''keras''' ) or name == "TFPreTrainedModel":
return "tf"
elif module.startswith('''torch''' ) or name == "PreTrainedModel":
return "pt"
elif module.startswith('''flax''' ) or module.startswith('''jax''' ) or name == "FlaxPreTrainedModel":
return "flax"
else:
raise TypeError(f"""Could not infer framework from class {model_class}.""" )
| 23
|
# This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
ControlNetModel,
DDIMScheduler,
StableDiffusionControlNetImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel
from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
)
enable_full_determinism()
class __snake_case ( a , a , a , unittest.TestCase ):
UpperCAmelCase__ : List[Any] = StableDiffusionControlNetImgaImgPipeline
UpperCAmelCase__ : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''}
UpperCAmelCase__ : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
UpperCAmelCase__ : Union[str, Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({'''control_image'''} )
UpperCAmelCase__ : Optional[Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
def lowerCamelCase ( self : int):
"""simple docstring"""
torch.manual_seed(0)
UpperCAmelCase_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
torch.manual_seed(0)
UpperCAmelCase_ = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
torch.manual_seed(0)
UpperCAmelCase_ = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='''scaled_linear''' , clip_sample=_snake_case , set_alpha_to_one=_snake_case , )
torch.manual_seed(0)
UpperCAmelCase_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0)
UpperCAmelCase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
UpperCAmelCase_ = CLIPTextModel(_snake_case)
UpperCAmelCase_ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''')
UpperCAmelCase_ = {
'''unet''': unet,
'''controlnet''': controlnet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def lowerCamelCase ( self : Union[str, Any] , _snake_case : Any , _snake_case : Dict=0):
"""simple docstring"""
if str(_snake_case).startswith('''mps'''):
UpperCAmelCase_ = torch.manual_seed(_snake_case)
else:
UpperCAmelCase_ = torch.Generator(device=_snake_case).manual_seed(_snake_case)
UpperCAmelCase_ = 2
UpperCAmelCase_ = randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=_snake_case , device=torch.device(_snake_case) , )
UpperCAmelCase_ = floats_tensor(control_image.shape , rng=random.Random(_snake_case)).to(_snake_case)
UpperCAmelCase_ = image.cpu().permute(0 , 2 , 3 , 1)[0]
UpperCAmelCase_ = Image.fromarray(np.uinta(_snake_case)).convert('''RGB''').resize((64, 64))
UpperCAmelCase_ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
'''image''': image,
'''control_image''': control_image,
}
return inputs
def lowerCamelCase ( self : Any):
"""simple docstring"""
return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3)
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def lowerCamelCase ( self : Any):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3)
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
self._test_inference_batch_single_identical(expected_max_diff=2e-3)
class __snake_case ( a , a , unittest.TestCase ):
UpperCAmelCase__ : str = StableDiffusionControlNetImgaImgPipeline
UpperCAmelCase__ : List[str] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''}
UpperCAmelCase__ : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
UpperCAmelCase__ : str = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess
def lowerCamelCase ( self : str):
"""simple docstring"""
torch.manual_seed(0)
UpperCAmelCase_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
torch.manual_seed(0)
def init_weights(_snake_case : Optional[int]):
if isinstance(_snake_case , torch.nn.Convad):
torch.nn.init.normal(m.weight)
m.bias.data.fill_(1.0)
UpperCAmelCase_ = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(_snake_case)
torch.manual_seed(0)
UpperCAmelCase_ = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(_snake_case)
torch.manual_seed(0)
UpperCAmelCase_ = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='''scaled_linear''' , clip_sample=_snake_case , set_alpha_to_one=_snake_case , )
torch.manual_seed(0)
UpperCAmelCase_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0)
UpperCAmelCase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
UpperCAmelCase_ = CLIPTextModel(_snake_case)
UpperCAmelCase_ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''')
UpperCAmelCase_ = MultiControlNetModel([controlneta, controlneta])
UpperCAmelCase_ = {
'''unet''': unet,
'''controlnet''': controlnet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def lowerCamelCase ( self : int , _snake_case : Union[str, Any] , _snake_case : str=0):
"""simple docstring"""
if str(_snake_case).startswith('''mps'''):
UpperCAmelCase_ = torch.manual_seed(_snake_case)
else:
UpperCAmelCase_ = torch.Generator(device=_snake_case).manual_seed(_snake_case)
UpperCAmelCase_ = 2
UpperCAmelCase_ = [
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=_snake_case , device=torch.device(_snake_case) , ),
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=_snake_case , device=torch.device(_snake_case) , ),
]
UpperCAmelCase_ = floats_tensor(control_image[0].shape , rng=random.Random(_snake_case)).to(_snake_case)
UpperCAmelCase_ = image.cpu().permute(0 , 2 , 3 , 1)[0]
UpperCAmelCase_ = Image.fromarray(np.uinta(_snake_case)).convert('''RGB''').resize((64, 64))
UpperCAmelCase_ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
'''image''': image,
'''control_image''': control_image,
}
return inputs
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = self.get_dummy_components()
UpperCAmelCase_ = self.pipeline_class(**_snake_case)
pipe.to(_snake_case)
UpperCAmelCase_ = 1_0.0
UpperCAmelCase_ = 4
UpperCAmelCase_ = self.get_dummy_inputs(_snake_case)
UpperCAmelCase_ = steps
UpperCAmelCase_ = scale
UpperCAmelCase_ = pipe(**_snake_case)[0]
UpperCAmelCase_ = self.get_dummy_inputs(_snake_case)
UpperCAmelCase_ = steps
UpperCAmelCase_ = scale
UpperCAmelCase_ = pipe(**_snake_case , control_guidance_start=0.1 , control_guidance_end=0.2)[0]
UpperCAmelCase_ = self.get_dummy_inputs(_snake_case)
UpperCAmelCase_ = steps
UpperCAmelCase_ = scale
UpperCAmelCase_ = pipe(**_snake_case , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7])[0]
UpperCAmelCase_ = self.get_dummy_inputs(_snake_case)
UpperCAmelCase_ = steps
UpperCAmelCase_ = scale
UpperCAmelCase_ = pipe(**_snake_case , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8])[0]
# make sure that all outputs are different
assert np.sum(np.abs(output_a - output_a)) > 1e-3
assert np.sum(np.abs(output_a - output_a)) > 1e-3
assert np.sum(np.abs(output_a - output_a)) > 1e-3
def lowerCamelCase ( self : Dict):
"""simple docstring"""
return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3)
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def lowerCamelCase ( self : int):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3)
def lowerCamelCase ( self : int):
"""simple docstring"""
self._test_inference_batch_single_identical(expected_max_diff=2e-3)
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
UpperCAmelCase_ = self.get_dummy_components()
UpperCAmelCase_ = self.pipeline_class(**_snake_case)
pipe.to(_snake_case)
pipe.set_progress_bar_config(disable=_snake_case)
with tempfile.TemporaryDirectory() as tmpdir:
try:
# save_pretrained is not implemented for Multi-ControlNet
pipe.save_pretrained(_snake_case)
except NotImplementedError:
pass
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
UpperCAmelCase_ = ControlNetModel.from_pretrained('''lllyasviel/sd-controlnet-canny''')
UpperCAmelCase_ = StableDiffusionControlNetImgaImgPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , safety_checker=_snake_case , controlnet=_snake_case)
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=_snake_case)
UpperCAmelCase_ = torch.Generator(device='''cpu''').manual_seed(0)
UpperCAmelCase_ = '''evil space-punk bird'''
UpperCAmelCase_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png''').resize((512, 512))
UpperCAmelCase_ = load_image(
'''https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png''').resize((512, 512))
UpperCAmelCase_ = pipe(
_snake_case , _snake_case , control_image=_snake_case , generator=_snake_case , output_type='''np''' , num_inference_steps=50 , strength=0.6 , )
UpperCAmelCase_ = output.images[0]
assert image.shape == (512, 512, 3)
UpperCAmelCase_ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy''')
assert np.abs(expected_image - image).max() < 9e-2
| 51
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_UpperCAmelCase : List[Any] = {
"configuration_rembert": ["REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "RemBertConfig", "RemBertOnnxConfig"]
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : int = ["RemBertTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : List[Any] = ["RemBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Optional[int] = [
"REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"RemBertForCausalLM",
"RemBertForMaskedLM",
"RemBertForMultipleChoice",
"RemBertForQuestionAnswering",
"RemBertForSequenceClassification",
"RemBertForTokenClassification",
"RemBertLayer",
"RemBertModel",
"RemBertPreTrainedModel",
"load_tf_weights_in_rembert",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : int = [
"TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRemBertForCausalLM",
"TFRemBertForMaskedLM",
"TFRemBertForMultipleChoice",
"TFRemBertForQuestionAnswering",
"TFRemBertForSequenceClassification",
"TFRemBertForTokenClassification",
"TFRemBertLayer",
"TFRemBertModel",
"TFRemBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_rembert import REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RemBertConfig, RemBertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert import RemBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert_fast import RemBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rembert import (
REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RemBertForCausalLM,
RemBertForMaskedLM,
RemBertForMultipleChoice,
RemBertForQuestionAnswering,
RemBertForSequenceClassification,
RemBertForTokenClassification,
RemBertLayer,
RemBertModel,
RemBertPreTrainedModel,
load_tf_weights_in_rembert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rembert import (
TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRemBertForCausalLM,
TFRemBertForMaskedLM,
TFRemBertForMultipleChoice,
TFRemBertForQuestionAnswering,
TFRemBertForSequenceClassification,
TFRemBertForTokenClassification,
TFRemBertLayer,
TFRemBertModel,
TFRemBertPreTrainedModel,
)
else:
import sys
_UpperCAmelCase : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 50
|
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
snake_case_ : Tuple = logging.get_logger(__name__)
def A (__A : bool , __A : bool ) -> Optional[Any]:
"""simple docstring"""
def run_func(__A : Optional[Any] ):
@wraps(__A )
def run_in_eager_mode(*__A : Dict , **__A : List[Any] ):
return func(*__A , **__A )
@wraps(__A )
@tf.function(experimental_compile=__A )
def run_in_graph_mode(*__A : Optional[Any] , **__A : Any ):
return func(*__A , **__A )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
'''Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.''' )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def A (__A : int , __A : int , __A : int ) -> ["tf.Tensor"]:
"""simple docstring"""
UpperCAmelCase_ = random.Random()
UpperCAmelCase_ = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(__A , shape=(batch_size, sequence_length) , dtype=tf.intaa )
class __snake_case ( a ):
UpperCAmelCase__ : TensorFlowBenchmarkArguments
UpperCAmelCase__ : PretrainedConfig
UpperCAmelCase__ : str = "TensorFlow"
@property
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
return tf.__version__
def lowerCamelCase ( self : Dict , _snake_case : str , _snake_case : int , _snake_case : int):
"""simple docstring"""
UpperCAmelCase_ = self.args.strategy
if strategy is None:
raise ValueError('''A device strategy has to be initialized before using TensorFlow.''')
UpperCAmelCase_ = self._prepare_inference_func(_snake_case , _snake_case , _snake_case)
return self._measure_speed(_inference)
def lowerCamelCase ( self : Any , _snake_case : str , _snake_case : int , _snake_case : int):
"""simple docstring"""
UpperCAmelCase_ = self.args.strategy
if strategy is None:
raise ValueError('''A device strategy has to be initialized before using TensorFlow.''')
UpperCAmelCase_ = self._prepare_train_func(_snake_case , _snake_case , _snake_case)
return self._measure_speed(_train)
def lowerCamelCase ( self : Any , _snake_case : str , _snake_case : int , _snake_case : int):
"""simple docstring"""
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , _snake_case)
UpperCAmelCase_ = self.args.strategy
if strategy is None:
raise ValueError('''A device strategy has to be initialized before using TensorFlow.''')
UpperCAmelCase_ = self._prepare_inference_func(_snake_case , _snake_case , _snake_case)
return self._measure_memory(_inference)
def lowerCamelCase ( self : Optional[Any] , _snake_case : str , _snake_case : int , _snake_case : int):
"""simple docstring"""
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , _snake_case)
UpperCAmelCase_ = self.args.strategy
if strategy is None:
raise ValueError('''A device strategy has to be initialized before using TensorFlow.''')
UpperCAmelCase_ = self._prepare_train_func(_snake_case , _snake_case , _snake_case)
return self._measure_memory(_train)
def lowerCamelCase ( self : Optional[int] , _snake_case : str , _snake_case : int , _snake_case : int):
"""simple docstring"""
UpperCAmelCase_ = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError('''Mixed precision is currently not supported.''')
UpperCAmelCase_ = (
hasattr(_snake_case , '''architectures''')
and isinstance(config.architectures , _snake_case)
and len(config.architectures) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
UpperCAmelCase_ = '''TF''' + config.architectures[0] # prepend 'TF' for tensorflow model
UpperCAmelCase_ = __import__('''transformers''' , fromlist=[model_class])
UpperCAmelCase_ = getattr(_snake_case , _snake_case)
UpperCAmelCase_ = model_cls(_snake_case)
except ImportError:
raise ImportError(
F"""{model_class} does not exist. If you just want to test the pretrained model, you might want to"""
''' set `--only_pretrain_model` or `args.only_pretrain_model=True`.''')
else:
UpperCAmelCase_ = TF_MODEL_MAPPING[config.__class__](_snake_case)
# encoder-decoder has vocab size saved differently
UpperCAmelCase_ = config.vocab_size if hasattr(_snake_case , '''vocab_size''') else config.encoder.vocab_size
UpperCAmelCase_ = random_input_ids(_snake_case , _snake_case , _snake_case)
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla)
def encoder_decoder_forward():
return model(_snake_case , decoder_input_ids=_snake_case , training=_snake_case)
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla)
def encoder_forward():
return model(_snake_case , training=_snake_case)
UpperCAmelCase_ = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def lowerCamelCase ( self : Optional[Any] , _snake_case : str , _snake_case : int , _snake_case : int):
"""simple docstring"""
UpperCAmelCase_ = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError('''Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.''')
if self.args.fpaa:
raise NotImplementedError('''Mixed precision is currently not supported.''')
UpperCAmelCase_ = (
hasattr(_snake_case , '''architectures''')
and isinstance(config.architectures , _snake_case)
and len(config.architectures) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
UpperCAmelCase_ = '''TF''' + config.architectures[0] # prepend 'TF' for tensorflow model
UpperCAmelCase_ = __import__('''transformers''' , fromlist=[model_class])
UpperCAmelCase_ = getattr(_snake_case , _snake_case)
UpperCAmelCase_ = model_cls(_snake_case)
except ImportError:
raise ImportError(
F"""{model_class} does not exist. If you just want to test the pretrained model, you might want to"""
''' set `--only_pretrain_model` or `args.only_pretrain_model=True`.''')
else:
UpperCAmelCase_ = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](_snake_case)
# encoder-decoder has vocab size saved differently
UpperCAmelCase_ = config.vocab_size if hasattr(_snake_case , '''vocab_size''') else config.encoder.vocab_size
UpperCAmelCase_ = random_input_ids(_snake_case , _snake_case , _snake_case)
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla)
def encoder_decoder_train():
UpperCAmelCase_ = model(_snake_case , decoder_input_ids=_snake_case , labels=_snake_case , training=_snake_case)[0]
UpperCAmelCase_ = tf.gradients(_snake_case , model.trainable_variables)
return gradients
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla)
def encoder_train():
UpperCAmelCase_ = model(_snake_case , labels=_snake_case , training=_snake_case)[0]
UpperCAmelCase_ = tf.gradients(_snake_case , model.trainable_variables)
return gradients
UpperCAmelCase_ = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def lowerCamelCase ( self : Any , _snake_case : Optional[Any]):
"""simple docstring"""
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info('''Do inference on TPU. Running model 5 times to stabilize compilation''')
timeit.repeat(_snake_case , repeat=1 , number=5)
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
UpperCAmelCase_ = timeit.repeat(
_snake_case , repeat=self.args.repeat , number=10 , )
return min(_snake_case) / 1_0.0
except ResourceExhaustedError as e:
self.print_fn(F"""Doesn't fit on GPU. {e}""")
def lowerCamelCase ( self : Dict , _snake_case : Callable[[], None]):
"""simple docstring"""
logger.info(
'''Note that TensorFlow allocates more memory than '''
'''it might need to speed up computation. '''
'''The memory reported here corresponds to the memory '''
'''reported by `nvidia-smi`, which can vary depending '''
'''on total available memory on the GPU that is used.''')
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
'''`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory'''
''' consumption line by line.''')
UpperCAmelCase_ = start_memory_tracing('''transformers''')
if self.args.is_tpu:
# tpu
raise NotImplementedError(
'''Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking'''
''' with `args.memory=False`''')
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
'''py3nvml not installed, we won\'t log GPU memory usage. '''
'''Install py3nvml (pip install py3nvml) to log information about GPU.''')
UpperCAmelCase_ = '''N/A'''
else:
logger.info(
'''Measuring total GPU usage on GPU device. Make sure to not have additional processes'''
''' running on the same GPU.''')
# init nvml
nvml.nvmlInit()
func()
UpperCAmelCase_ = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx)
UpperCAmelCase_ = nvml.nvmlDeviceGetMemoryInfo(_snake_case)
UpperCAmelCase_ = meminfo.used
UpperCAmelCase_ = Memory(_snake_case)
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
'''When enabling line by line tracing, the max peak memory for CPU is inaccurate in'''
''' TensorFlow.''')
UpperCAmelCase_ = None
else:
UpperCAmelCase_ = measure_peak_memory_cpu(_snake_case)
UpperCAmelCase_ = Memory(_snake_case) if isinstance(_snake_case , _snake_case) else memory_bytes
if self.args.trace_memory_line_by_line:
UpperCAmelCase_ = stop_memory_tracing(_snake_case)
if memory is None:
UpperCAmelCase_ = summary.total
else:
UpperCAmelCase_ = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(F"""Doesn't fit on GPU. {e}""")
return "N/A", None
| 51
| 0
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowercase : Dict =logging.get_logger(__name__)
_lowercase : int ={
"google/vit-base-patch16-224": "https://huggingface.co/vit-base-patch16-224/resolve/main/config.json",
# See all ViT models at https://huggingface.co/models?filter=vit
}
class snake_case__ (A__ ):
"""simple docstring"""
__lowerCAmelCase :Dict = '''vit'''
def __init__( self , __lowercase=7_6_8 , __lowercase=1_2 , __lowercase=1_2 , __lowercase=3_0_7_2 , __lowercase="gelu" , __lowercase=0.0 , __lowercase=0.0 , __lowercase=0.0_2 , __lowercase=1E-12 , __lowercase=2_2_4 , __lowercase=1_6 , __lowercase=3 , __lowercase=True , __lowercase=1_6 , **__lowercase , ) -> str:
"""simple docstring"""
super().__init__(**_snake_case )
a__ : str = hidden_size
a__ : List[Any] = num_hidden_layers
a__ : Any = num_attention_heads
a__ : Union[str, Any] = intermediate_size
a__ : Optional[Any] = hidden_act
a__ : Union[str, Any] = hidden_dropout_prob
a__ : str = attention_probs_dropout_prob
a__ : Tuple = initializer_range
a__ : Any = layer_norm_eps
a__ : Dict = image_size
a__ : List[Any] = patch_size
a__ : Union[str, Any] = num_channels
a__ : str = qkv_bias
a__ : Optional[Any] = encoder_stride
class snake_case__ (A__ ):
"""simple docstring"""
__lowerCAmelCase :Optional[Any] = version.parse("1.11" )
@property
def SCREAMING_SNAKE_CASE__( self ) -> Optional[int]:
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def SCREAMING_SNAKE_CASE__( self ) -> int:
"""simple docstring"""
return 1E-4
| 170
|
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class __snake_case :
@staticmethod
def lowerCamelCase ( *_snake_case : Optional[int] , **_snake_case : int):
"""simple docstring"""
pass
def A (__A : Image ) -> str:
"""simple docstring"""
UpperCAmelCase_ = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class __snake_case ( unittest.TestCase ):
UpperCAmelCase__ : Tuple = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def lowerCamelCase ( self : Tuple , _snake_case : Optional[Any] , _snake_case : Optional[Any] , _snake_case : List[str]):
"""simple docstring"""
UpperCAmelCase_ = DepthEstimationPipeline(model=_snake_case , image_processor=_snake_case)
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def lowerCamelCase ( self : str , _snake_case : Optional[int] , _snake_case : List[str]):
"""simple docstring"""
UpperCAmelCase_ = depth_estimator('''./tests/fixtures/tests_samples/COCO/000000039769.png''')
self.assertEqual({'''predicted_depth''': ANY(torch.Tensor), '''depth''': ANY(Image.Image)} , _snake_case)
import datasets
UpperCAmelCase_ = datasets.load_dataset('''hf-internal-testing/fixtures_image_utils''' , '''image''' , split='''test''')
UpperCAmelCase_ = depth_estimator(
[
Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png'''),
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
# RGBA
dataset[0]['''file'''],
# LA
dataset[1]['''file'''],
# L
dataset[2]['''file'''],
])
self.assertEqual(
[
{'''predicted_depth''': ANY(torch.Tensor), '''depth''': ANY(Image.Image)},
{'''predicted_depth''': ANY(torch.Tensor), '''depth''': ANY(Image.Image)},
{'''predicted_depth''': ANY(torch.Tensor), '''depth''': ANY(Image.Image)},
{'''predicted_depth''': ANY(torch.Tensor), '''depth''': ANY(Image.Image)},
{'''predicted_depth''': ANY(torch.Tensor), '''depth''': ANY(Image.Image)},
] , _snake_case , )
@require_tf
@unittest.skip('''Depth estimation is not implemented in TF''')
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
pass
@slow
@require_torch
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = '''Intel/dpt-large'''
UpperCAmelCase_ = pipeline('''depth-estimation''' , model=_snake_case)
UpperCAmelCase_ = depth_estimator('''http://images.cocodataset.org/val2017/000000039769.jpg''')
UpperCAmelCase_ = hashimage(outputs['''depth'''])
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs['''predicted_depth'''].max().item()) , 2_9.3_0_4)
self.assertEqual(nested_simplify(outputs['''predicted_depth'''].min().item()) , 2.6_6_2)
@require_torch
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
self.skipTest('''There is not hf-internal-testing tiny model for either GLPN nor DPT''')
| 51
| 0
|
import os
import sys
from contextlib import contextmanager
# Windows only
if os.name == "nt":
import ctypes
import msvcrt # noqa
class SCREAMING_SNAKE_CASE__ ( ctypes.Structure ):
"""simple docstring"""
_snake_case = [('''size''', ctypes.c_int), ('''visible''', ctypes.c_byte)]
def A_ ( ) -> Dict:
'''simple docstring'''
if os.name == "nt":
__UpperCamelCase = CursorInfo()
__UpperCamelCase = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(__A , ctypes.byref(__A ) )
__UpperCamelCase = False
ctypes.windll.kernelaa.SetConsoleCursorInfo(__A , ctypes.byref(__A ) )
elif os.name == "posix":
sys.stdout.write('''\033[?25l''' )
sys.stdout.flush()
def A_ ( ) -> Any:
'''simple docstring'''
if os.name == "nt":
__UpperCamelCase = CursorInfo()
__UpperCamelCase = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(__A , ctypes.byref(__A ) )
__UpperCamelCase = True
ctypes.windll.kernelaa.SetConsoleCursorInfo(__A , ctypes.byref(__A ) )
elif os.name == "posix":
sys.stdout.write('''\033[?25h''' )
sys.stdout.flush()
@contextmanager
def A_ ( ) -> Dict:
'''simple docstring'''
try:
hide_cursor()
yield
finally:
show_cursor()
| 328
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
snake_case_ : int = {
"configuration_deberta": ["DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP", "DebertaConfig", "DebertaOnnxConfig"],
"tokenization_deberta": ["DebertaTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : int = ["DebertaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : List[str] = [
"DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"DebertaForMaskedLM",
"DebertaForQuestionAnswering",
"DebertaForSequenceClassification",
"DebertaForTokenClassification",
"DebertaModel",
"DebertaPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Any = [
"TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFDebertaForMaskedLM",
"TFDebertaForQuestionAnswering",
"TFDebertaForSequenceClassification",
"TFDebertaForTokenClassification",
"TFDebertaModel",
"TFDebertaPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig
from .tokenization_deberta import DebertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_deberta_fast import DebertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deberta import (
DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
DebertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deberta import (
TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDebertaForMaskedLM,
TFDebertaForQuestionAnswering,
TFDebertaForSequenceClassification,
TFDebertaForTokenClassification,
TFDebertaModel,
TFDebertaPreTrainedModel,
)
else:
import sys
snake_case_ : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 51
| 0
|
'''simple docstring'''
import math
import unittest
from transformers import BioGptConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptTokenizer,
)
from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST
class A__ :
def __init__( self , UpperCamelCase__ , UpperCamelCase__=13 , UpperCamelCase__=7 , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=False , UpperCamelCase__=True , UpperCamelCase__=99 , UpperCamelCase__=32 , UpperCamelCase__=5 , UpperCamelCase__=4 , UpperCamelCase__=37 , UpperCamelCase__="gelu" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=512 , UpperCamelCase__=16 , UpperCamelCase__=2 , UpperCamelCase__=0.02 , UpperCamelCase__=3 , UpperCamelCase__=4 , UpperCamelCase__=None , ) -> Optional[Any]:
'''simple docstring'''
A_ = parent
A_ = batch_size
A_ = seq_length
A_ = is_training
A_ = use_input_mask
A_ = use_token_type_ids
A_ = use_labels
A_ = vocab_size
A_ = hidden_size
A_ = num_hidden_layers
A_ = num_attention_heads
A_ = intermediate_size
A_ = hidden_act
A_ = hidden_dropout_prob
A_ = attention_probs_dropout_prob
A_ = max_position_embeddings
A_ = type_vocab_size
A_ = type_sequence_label_size
A_ = initializer_range
A_ = num_labels
A_ = num_choices
A_ = scope
def snake_case_ ( self ) -> Optional[Any]:
'''simple docstring'''
A_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A_ = None
if self.use_input_mask:
A_ = random_attention_mask([self.batch_size, self.seq_length] )
A_ = None
if self.use_token_type_ids:
A_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A_ = None
A_ = None
A_ = None
if self.use_labels:
A_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A_ = ids_tensor([self.batch_size] , self.num_choices )
A_ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case_ ( self ) -> Dict:
'''simple docstring'''
return BioGptConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_snake_case , initializer_range=self.initializer_range , )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
A_ = BioGptModel(config=_snake_case )
model.to(_snake_case )
model.eval()
A_ = model(_snake_case , attention_mask=_snake_case )
A_ = model(_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ) -> Optional[int]:
'''simple docstring'''
A_ = BioGptForCausalLM(config=_snake_case )
model.to(_snake_case )
model.eval()
A_ = model(_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , labels=_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , *UpperCamelCase__ ) -> str:
'''simple docstring'''
A_ = BioGptModel(config=_snake_case )
model.to(_snake_case )
model.eval()
# create attention mask
A_ = torch.ones(input_ids.shape , dtype=torch.long , device=_snake_case )
A_ = self.seq_length // 2
A_ = 0
# first forward pass
A_ , A_ = model(_snake_case , attention_mask=_snake_case ).to_tuple()
# create hypothetical next token and extent to next_input_ids
A_ = ids_tensor((self.batch_size, 1) , config.vocab_size )
# change a random masked slice from input_ids
A_ = ids_tensor((1,) , _snake_case ).item() + 1
A_ = ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 )
A_ = random_other_next_tokens
# append to next input_ids and attn_mask
A_ = torch.cat([input_ids, next_tokens] , dim=-1 )
A_ = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=_snake_case )] , dim=1 , )
# get two different outputs
A_ = model(_snake_case , attention_mask=_snake_case )["""last_hidden_state"""]
A_ = model(_snake_case , past_key_values=_snake_case , attention_mask=_snake_case )["""last_hidden_state"""]
# select random slice
A_ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
A_ = output_from_no_past[:, -1, random_slice_idx].detach()
A_ = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_snake_case , _snake_case , atol=1e-3 ) )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , *UpperCamelCase__ ) -> str:
'''simple docstring'''
A_ = BioGptModel(config=_snake_case ).to(_snake_case ).eval()
A_ = torch.ones(input_ids.shape , dtype=torch.long , device=_snake_case )
# first forward pass
A_ = model(_snake_case , attention_mask=_snake_case , use_cache=_snake_case )
A_ , A_ = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
A_ = ids_tensor((self.batch_size, 3) , config.vocab_size )
A_ = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
A_ = torch.cat([input_ids, next_tokens] , dim=-1 )
A_ = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
A_ = model(_snake_case , attention_mask=_snake_case )["""last_hidden_state"""]
A_ = model(_snake_case , attention_mask=_snake_case , past_key_values=_snake_case )[
"""last_hidden_state"""
]
# select random slice
A_ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
A_ = output_from_no_past[:, -3:, random_slice_idx].detach()
A_ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_snake_case , _snake_case , atol=1e-3 ) )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , *UpperCamelCase__ , UpperCamelCase__=False ) -> str:
'''simple docstring'''
A_ = BioGptForCausalLM(_snake_case )
model.to(_snake_case )
if gradient_checkpointing:
model.gradient_checkpointing_enable()
A_ = model(_snake_case , labels=_snake_case )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
result.loss.backward()
def snake_case_ ( self , UpperCamelCase__ , *UpperCamelCase__ ) -> str:
'''simple docstring'''
A_ = BioGptModel(_snake_case )
A_ = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers )
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.001 )
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.01 )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , *UpperCamelCase__ ) -> int:
'''simple docstring'''
A_ = self.num_labels
A_ = BioGptForTokenClassification(_snake_case )
model.to(_snake_case )
model.eval()
A_ = model(_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
A_ = self.prepare_config_and_inputs()
(
(
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) ,
) = config_and_inputs
A_ = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class A__ ( _snake_case , _snake_case , _snake_case , unittest.TestCase ):
lowercase = (
(BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification)
if is_torch_available()
else ()
)
lowercase = (BioGptForCausalLM,) if is_torch_available() else ()
lowercase = (
{
'''feature-extraction''': BioGptModel,
'''text-classification''': BioGptForSequenceClassification,
'''text-generation''': BioGptForCausalLM,
'''token-classification''': BioGptForTokenClassification,
'''zero-shot''': BioGptForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase = False
def snake_case_ ( self ) -> Dict:
'''simple docstring'''
A_ = BioGptModelTester(self )
A_ = ConfigTester(self , config_class=_snake_case , hidden_size=37 )
def snake_case_ ( self ) -> Optional[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def snake_case_ ( self ) -> Any:
'''simple docstring'''
A_ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
A_ = type
self.model_tester.create_and_check_model(*_snake_case )
def snake_case_ ( self ) -> str:
'''simple docstring'''
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_attention_mask_past(*_snake_case )
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*_snake_case , gradient_checkpointing=_snake_case )
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_past_large_inputs(*_snake_case )
def snake_case_ ( self ) -> Dict:
'''simple docstring'''
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_weight_initialization(*_snake_case )
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_for_token_classification(*_snake_case )
@slow
def snake_case_ ( self ) -> Tuple:
'''simple docstring'''
A_ = BioGptForCausalLM.from_pretrained("""microsoft/biogpt""" )
model.to(_snake_case )
A_ = BioGptTokenizer.from_pretrained("""microsoft/biogpt""" )
A_ = """left"""
# Define PAD Token = EOS Token = 50256
A_ = tokenizer.eos_token
A_ = model.config.eos_token_id
# use different length sentences to test batching
A_ = [
"""Hello, my dog is a little""",
"""Today, I""",
]
A_ = tokenizer(_snake_case , return_tensors="""pt""" , padding=_snake_case )
A_ = inputs["""input_ids"""].to(_snake_case )
A_ = model.generate(
input_ids=_snake_case , attention_mask=inputs["""attention_mask"""].to(_snake_case ) , )
A_ = tokenizer(sentences[0] , return_tensors="""pt""" ).input_ids.to(_snake_case )
A_ = model.generate(input_ids=_snake_case )
A_ = inputs_non_padded.shape[-1] - inputs["""attention_mask"""][-1].long().sum().cpu().item()
A_ = tokenizer(sentences[1] , return_tensors="""pt""" ).input_ids.to(_snake_case )
A_ = model.generate(input_ids=_snake_case , max_length=model.config.max_length - num_paddings )
A_ = tokenizer.batch_decode(_snake_case , skip_special_tokens=_snake_case )
A_ = tokenizer.decode(output_non_padded[0] , skip_special_tokens=_snake_case )
A_ = tokenizer.decode(output_padded[0] , skip_special_tokens=_snake_case )
A_ = [
"""Hello, my dog is a little bit bigger than a little bit.""",
"""Today, I have a good idea of how to use the information""",
]
self.assertListEqual(_snake_case , _snake_case )
self.assertListEqual(_snake_case , [non_padded_sentence, padded_sentence] )
@slow
def snake_case_ ( self ) -> Any:
'''simple docstring'''
for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ = BioGptModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
def snake_case_ ( self ) -> List[Any]:
'''simple docstring'''
A_ , A_ = self.model_tester.prepare_config_and_inputs_for_common()
A_ = 3
A_ = input_dict["""input_ids"""]
A_ = input_ids.ne(1 ).to(_snake_case )
A_ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
A_ = BioGptForSequenceClassification(_snake_case )
model.to(_snake_case )
model.eval()
A_ = model(_snake_case , attention_mask=_snake_case , labels=_snake_case )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def snake_case_ ( self ) -> Tuple:
'''simple docstring'''
A_ , A_ = self.model_tester.prepare_config_and_inputs_for_common()
A_ = 3
A_ = """multi_label_classification"""
A_ = input_dict["""input_ids"""]
A_ = input_ids.ne(1 ).to(_snake_case )
A_ = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
A_ = BioGptForSequenceClassification(_snake_case )
model.to(_snake_case )
model.eval()
A_ = model(_snake_case , attention_mask=_snake_case , labels=_snake_case )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@require_torch
class A__ ( unittest.TestCase ):
@slow
def snake_case_ ( self ) -> int:
'''simple docstring'''
A_ = BioGptForCausalLM.from_pretrained("""microsoft/biogpt""" )
A_ = torch.tensor([[2, 4805, 9, 656, 21]] )
A_ = model(_snake_case )[0]
A_ = 42384
A_ = torch.Size((1, 5, vocab_size) )
self.assertEqual(output.shape , _snake_case )
A_ = torch.tensor(
[[[-9.5236, -9.8918, 10.4557], [-11.0469, -9.6423, 8.1022], [-8.8664, -7.8826, 5.5325]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _snake_case , atol=1e-4 ) )
@slow
def snake_case_ ( self ) -> int:
'''simple docstring'''
A_ = BioGptTokenizer.from_pretrained("""microsoft/biogpt""" )
A_ = BioGptForCausalLM.from_pretrained("""microsoft/biogpt""" )
model.to(_snake_case )
torch.manual_seed(0 )
A_ = tokenizer("""COVID-19 is""" , return_tensors="""pt""" ).to(_snake_case )
A_ = model.generate(
**_snake_case , min_length=100 , max_length=1024 , num_beams=5 , early_stopping=_snake_case , )
A_ = tokenizer.decode(output_ids[0] , skip_special_tokens=_snake_case )
A_ = (
"""COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the"""
""" causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and"""
""" territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),"""
""" and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and"""
""" more than 800,000 deaths."""
)
self.assertEqual(_snake_case , _snake_case )
| 162
|
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
snake_case_ : Union[str, Any] = {"configuration_gpt_neox": ["GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoXConfig"]}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Dict = ["GPTNeoXTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : str = [
"GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTNeoXForCausalLM",
"GPTNeoXForQuestionAnswering",
"GPTNeoXForSequenceClassification",
"GPTNeoXForTokenClassification",
"GPTNeoXLayer",
"GPTNeoXModel",
"GPTNeoXPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox import (
GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXLayer,
GPTNeoXModel,
GPTNeoXPreTrainedModel,
)
else:
import sys
snake_case_ : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 51
| 0
|
'''simple docstring'''
from collections import deque
from .hash_table import HashTable
class __lowercase ( _lowercase ):
def __init__(self , *A , **A ):
super().__init__(*_snake_case , **_snake_case )
def UpperCAmelCase__ (self , A , A ):
lowerCamelCase_ : List[str] = deque([] ) if self.values[key] is None else self.values[key]
self.values[key].appendleft(_snake_case )
lowerCamelCase_ : Optional[int] = self.values[key]
def UpperCAmelCase__ (self ):
return (
sum(self.charge_factor - len(_snake_case ) for slot in self.values )
/ self.size_table
* self.charge_factor
)
def UpperCAmelCase__ (self , A , A=None ):
if not (
len(self.values[key] ) == self.charge_factor and self.values.count(_snake_case ) == 0
):
return key
return super()._collision_resolution(_snake_case , _snake_case )
| 318
|
def A (__A : list , __A : int , __A : int = 0 , __A : int = 0 ) -> int:
"""simple docstring"""
UpperCAmelCase_ = right or len(__A ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(__A , __A , left + 1 , right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 51
| 0
|
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class __lowerCamelCase (_a , _a , _a ):
_lowercase = [r'''h\.\d+\.attn\.bias''', r'''h\.\d+\.attn\.masked_bias''']
@register_to_config
def __init__( self: int,A_: int,A_: int,A_: Optional[int] = None,A_: int = 5_0257,A_: int = 1024,A_: int = 768,A_: int = 12,A_: int = 12,A_: Optional[int] = None,A_: str = "gelu_new",A_: float = 0.1,A_: float = 0.1,A_: float = 0.1,A_: float = 1E-5,A_: float = 0.0_2,A_: bool = True,A_: bool = True,A_: bool = False,A_: bool = False,):
'''simple docstring'''
super().__init__()
__UpperCamelCase = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
F'''`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and'''
F''' `n_embd`: {n_embd} are not equal.''' )
__UpperCamelCase = prefix_inner_dim
__UpperCamelCase = prefix_hidden_dim
__UpperCamelCase = (
nn.Linear(self.prefix_inner_dim,self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
__UpperCamelCase = (
nn.Linear(self.prefix_hidden_dim,_snake_case ) if self.prefix_hidden_dim is not None else nn.Identity()
)
__UpperCamelCase = GPTaConfig(
vocab_size=_snake_case,n_positions=_snake_case,n_embd=_snake_case,n_layer=_snake_case,n_head=_snake_case,n_inner=_snake_case,activation_function=_snake_case,resid_pdrop=_snake_case,embd_pdrop=_snake_case,attn_pdrop=_snake_case,layer_norm_epsilon=_snake_case,initializer_range=_snake_case,scale_attn_weights=_snake_case,use_cache=_snake_case,scale_attn_by_inverse_layer_idx=_snake_case,reorder_and_upcast_attn=_snake_case,)
__UpperCamelCase = GPTaLMHeadModel(_snake_case )
def snake_case_ ( self: List[Any],A_: torch.Tensor,A_: torch.Tensor,A_: Optional[torch.Tensor] = None,A_: Optional[torch.Tensor] = None,):
'''simple docstring'''
__UpperCamelCase = self.transformer.transformer.wte(_snake_case )
__UpperCamelCase = self.encode_prefix(_snake_case )
__UpperCamelCase = self.decode_prefix(_snake_case )
__UpperCamelCase = torch.cat((prefix_embeds, embedding_text),dim=1 )
if labels is not None:
__UpperCamelCase = self.get_dummy_token(input_ids.shape[0],input_ids.device )
__UpperCamelCase = torch.cat((dummy_token, input_ids),dim=1 )
__UpperCamelCase = self.transformer(inputs_embeds=_snake_case,labels=_snake_case,attention_mask=_snake_case )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def snake_case_ ( self: Any,A_: int,A_: torch.device ):
'''simple docstring'''
return torch.zeros(_snake_case,self.prefix_length,dtype=torch.intaa,device=_snake_case )
def snake_case_ ( self: Union[str, Any],A_: List[str] ):
'''simple docstring'''
return self.encode_prefix(_snake_case )
@torch.no_grad()
def snake_case_ ( self: Any,A_: Dict,A_: Optional[int],A_: Any ):
'''simple docstring'''
__UpperCamelCase = torch.split(_snake_case,1,dim=0 )
__UpperCamelCase = []
__UpperCamelCase = []
for feature in features:
__UpperCamelCase = self.decode_prefix(feature.to(_snake_case ) ) # back to the clip feature
# Only support beam search for now
__UpperCamelCase, __UpperCamelCase = self.generate_beam(
input_embeds=_snake_case,device=_snake_case,eos_token_id=_snake_case )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
__UpperCamelCase = torch.stack(_snake_case )
__UpperCamelCase = torch.stack(_snake_case )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def snake_case_ ( self: List[Any],A_: Tuple=None,A_: Optional[int]=None,A_: List[Any]=None,A_: int = 5,A_: int = 67,A_: float = 1.0,A_: Optional[int] = None,):
'''simple docstring'''
__UpperCamelCase = eos_token_id
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = torch.ones(_snake_case,device=_snake_case,dtype=torch.int )
__UpperCamelCase = torch.zeros(_snake_case,device=_snake_case,dtype=torch.bool )
if input_embeds is not None:
__UpperCamelCase = input_embeds
else:
__UpperCamelCase = self.transformer.transformer.wte(_snake_case )
for i in range(_snake_case ):
__UpperCamelCase = self.transformer(inputs_embeds=_snake_case )
__UpperCamelCase = outputs.logits
__UpperCamelCase = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
__UpperCamelCase = logits.softmax(-1 ).log()
if scores is None:
__UpperCamelCase, __UpperCamelCase = logits.topk(_snake_case,-1 )
__UpperCamelCase = generated.expand(_snake_case,*generated.shape[1:] )
__UpperCamelCase, __UpperCamelCase = next_tokens.permute(1,0 ), scores.squeeze(0 )
if tokens is None:
__UpperCamelCase = next_tokens
else:
__UpperCamelCase = tokens.expand(_snake_case,*tokens.shape[1:] )
__UpperCamelCase = torch.cat((tokens, next_tokens),dim=1 )
else:
__UpperCamelCase = -float(np.inf )
__UpperCamelCase = 0
__UpperCamelCase = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
__UpperCamelCase = scores_sum / seq_lengths[:, None]
__UpperCamelCase, __UpperCamelCase = scores_sum_average.view(-1 ).topk(_snake_case,-1 )
__UpperCamelCase = next_tokens // scores_sum.shape[1]
__UpperCamelCase = seq_lengths[next_tokens_source]
__UpperCamelCase = next_tokens % scores_sum.shape[1]
__UpperCamelCase = next_tokens.unsqueeze(1 )
__UpperCamelCase = tokens[next_tokens_source]
__UpperCamelCase = torch.cat((tokens, next_tokens),dim=1 )
__UpperCamelCase = generated[next_tokens_source]
__UpperCamelCase = scores_sum_average * seq_lengths
__UpperCamelCase = is_stopped[next_tokens_source]
__UpperCamelCase = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0],1,-1 )
__UpperCamelCase = torch.cat((generated, next_token_embed),dim=1 )
__UpperCamelCase = is_stopped + next_tokens.eq(_snake_case ).squeeze()
if is_stopped.all():
break
__UpperCamelCase = scores / seq_lengths
__UpperCamelCase = scores.argsort(descending=_snake_case )
# tokens tensors are already padded to max_seq_length
__UpperCamelCase = [tokens[i] for i in order]
__UpperCamelCase = torch.stack(_snake_case,dim=0 )
__UpperCamelCase = torch.tensor([seq_lengths[i] for i in order],dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 310
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ : int = logging.get_logger(__name__)
snake_case_ : str = {}
class __snake_case ( a ):
UpperCAmelCase__ : str = '''llama'''
UpperCAmelCase__ : Dict = ['''past_key_values''']
def __init__( self : str , _snake_case : List[str]=32000 , _snake_case : int=4096 , _snake_case : List[str]=11008 , _snake_case : Optional[int]=32 , _snake_case : List[Any]=32 , _snake_case : Tuple=None , _snake_case : int="silu" , _snake_case : List[Any]=2048 , _snake_case : List[str]=0.0_2 , _snake_case : Any=1e-6 , _snake_case : List[str]=True , _snake_case : Optional[Any]=0 , _snake_case : Dict=1 , _snake_case : List[Any]=2 , _snake_case : str=1 , _snake_case : Union[str, Any]=False , _snake_case : str=None , **_snake_case : List[Any] , ):
"""simple docstring"""
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = num_key_value_heads
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = rms_norm_eps
UpperCAmelCase_ = pretraining_tp
UpperCAmelCase_ = use_cache
UpperCAmelCase_ = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=_snake_case , bos_token_id=_snake_case , eos_token_id=_snake_case , tie_word_embeddings=_snake_case , **_snake_case , )
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , _snake_case) or len(self.rope_scaling) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
F"""got {self.rope_scaling}""")
UpperCAmelCase_ = self.rope_scaling.get('''type''' , _snake_case)
UpperCAmelCase_ = self.rope_scaling.get('''factor''' , _snake_case)
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""")
if rope_scaling_factor is None or not isinstance(_snake_case , _snake_case) or rope_scaling_factor <= 1.0:
raise ValueError(F"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""")
| 51
| 0
|
"""simple docstring"""
# This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
ControlNetModel,
DDIMScheduler,
StableDiffusionControlNetImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel
from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
)
enable_full_determinism()
class __lowerCamelCase ( A__ , A__ , A__ , unittest.TestCase ):
'''simple docstring'''
a_ : List[Any] = StableDiffusionControlNetImgaImgPipeline
a_ : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''}
a_ : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
a_ : Union[str, Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({"""control_image"""} )
a_ : Optional[Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
def lowerCamelCase ( self : int ):
torch.manual_seed(0 )
lowerCAmelCase_ : Dict = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
torch.manual_seed(0 )
lowerCAmelCase_ : List[str] = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
torch.manual_seed(0 )
lowerCAmelCase_ : List[str] = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=_snake_case , set_alpha_to_one=_snake_case , )
torch.manual_seed(0 )
lowerCAmelCase_ : List[Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
lowerCAmelCase_ : Dict = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
lowerCAmelCase_ : Dict = CLIPTextModel(_snake_case )
lowerCAmelCase_ : Union[str, Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
lowerCAmelCase_ : List[str] = {
"unet": unet,
"controlnet": controlnet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def lowerCamelCase ( self : Union[str, Any] , a_ : Any , a_ : Dict=0 ):
if str(_snake_case ).startswith("mps" ):
lowerCAmelCase_ : int = torch.manual_seed(_snake_case )
else:
lowerCAmelCase_ : str = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
lowerCAmelCase_ : Any = 2
lowerCAmelCase_ : Optional[Any] = randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=_snake_case , device=torch.device(_snake_case ) , )
lowerCAmelCase_ : Dict = floats_tensor(control_image.shape , rng=random.Random(_snake_case ) ).to(_snake_case )
lowerCAmelCase_ : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCAmelCase_ : Optional[int] = Image.fromarray(np.uinta(_snake_case ) ).convert("RGB" ).resize((64, 64) )
lowerCAmelCase_ : Optional[Any] = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
"image": image,
"control_image": control_image,
}
return inputs
def lowerCamelCase ( self : Any ):
return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3 )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def lowerCamelCase ( self : Any ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3 )
def lowerCamelCase ( self : Optional[Any] ):
self._test_inference_batch_single_identical(expected_max_diff=2e-3 )
class __lowerCamelCase ( A__ , A__ , unittest.TestCase ):
'''simple docstring'''
a_ : str = StableDiffusionControlNetImgaImgPipeline
a_ : List[str] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''}
a_ : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
a_ : str = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess
def lowerCamelCase ( self : str ):
torch.manual_seed(0 )
lowerCAmelCase_ : Union[str, Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
torch.manual_seed(0 )
def init_weights(a_ : Optional[int] ):
if isinstance(_snake_case , torch.nn.Convad ):
torch.nn.init.normal(m.weight )
m.bias.data.fill_(1.0 )
lowerCAmelCase_ : List[str] = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(_snake_case )
torch.manual_seed(0 )
lowerCAmelCase_ : str = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(_snake_case )
torch.manual_seed(0 )
lowerCAmelCase_ : Dict = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=_snake_case , set_alpha_to_one=_snake_case , )
torch.manual_seed(0 )
lowerCAmelCase_ : Optional[int] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
lowerCAmelCase_ : List[str] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
lowerCAmelCase_ : Optional[int] = CLIPTextModel(_snake_case )
lowerCAmelCase_ : Dict = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
lowerCAmelCase_ : List[Any] = MultiControlNetModel([controlneta, controlneta] )
lowerCAmelCase_ : List[str] = {
"unet": unet,
"controlnet": controlnet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def lowerCamelCase ( self : int , a_ : Union[str, Any] , a_ : str=0 ):
if str(_snake_case ).startswith("mps" ):
lowerCAmelCase_ : Tuple = torch.manual_seed(_snake_case )
else:
lowerCAmelCase_ : Optional[Any] = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
lowerCAmelCase_ : Union[str, Any] = 2
lowerCAmelCase_ : List[Any] = [
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=_snake_case , device=torch.device(_snake_case ) , ),
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=_snake_case , device=torch.device(_snake_case ) , ),
]
lowerCAmelCase_ : Dict = floats_tensor(control_image[0].shape , rng=random.Random(_snake_case ) ).to(_snake_case )
lowerCAmelCase_ : int = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCAmelCase_ : Any = Image.fromarray(np.uinta(_snake_case ) ).convert("RGB" ).resize((64, 64) )
lowerCAmelCase_ : Optional[Any] = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
"image": image,
"control_image": control_image,
}
return inputs
def lowerCamelCase ( self : Optional[Any] ):
lowerCAmelCase_ : Tuple = self.get_dummy_components()
lowerCAmelCase_ : Tuple = self.pipeline_class(**_snake_case )
pipe.to(_snake_case )
lowerCAmelCase_ : Optional[int] = 10.0
lowerCAmelCase_ : Optional[Any] = 4
lowerCAmelCase_ : Tuple = self.get_dummy_inputs(_snake_case )
lowerCAmelCase_ : Optional[Any] = steps
lowerCAmelCase_ : Any = scale
lowerCAmelCase_ : int = pipe(**_snake_case )[0]
lowerCAmelCase_ : Dict = self.get_dummy_inputs(_snake_case )
lowerCAmelCase_ : List[Any] = steps
lowerCAmelCase_ : List[Any] = scale
lowerCAmelCase_ : Dict = pipe(**_snake_case , control_guidance_start=0.1 , control_guidance_end=0.2 )[0]
lowerCAmelCase_ : Dict = self.get_dummy_inputs(_snake_case )
lowerCAmelCase_ : Dict = steps
lowerCAmelCase_ : Optional[int] = scale
lowerCAmelCase_ : List[Any] = pipe(**_snake_case , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7] )[0]
lowerCAmelCase_ : List[str] = self.get_dummy_inputs(_snake_case )
lowerCAmelCase_ : int = steps
lowerCAmelCase_ : Tuple = scale
lowerCAmelCase_ : str = pipe(**_snake_case , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8] )[0]
# make sure that all outputs are different
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
def lowerCamelCase ( self : Dict ):
return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3 )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def lowerCamelCase ( self : int ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3 )
def lowerCamelCase ( self : int ):
self._test_inference_batch_single_identical(expected_max_diff=2e-3 )
def lowerCamelCase ( self : Optional[int] ):
lowerCAmelCase_ : List[str] = self.get_dummy_components()
lowerCAmelCase_ : List[str] = self.pipeline_class(**_snake_case )
pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
with tempfile.TemporaryDirectory() as tmpdir:
try:
# save_pretrained is not implemented for Multi-ControlNet
pipe.save_pretrained(_snake_case )
except NotImplementedError:
pass
@slow
@require_torch_gpu
class __lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase ( self : Optional[int] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase ( self : Optional[int] ):
lowerCAmelCase_ : str = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny" )
lowerCAmelCase_ : List[str] = StableDiffusionControlNetImgaImgPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , safety_checker=_snake_case , controlnet=_snake_case )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=_snake_case )
lowerCAmelCase_ : Optional[Any] = torch.Generator(device="cpu" ).manual_seed(0 )
lowerCAmelCase_ : List[Any] = "evil space-punk bird"
lowerCAmelCase_ : str = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png" ).resize((5_12, 5_12) )
lowerCAmelCase_ : Optional[Any] = load_image(
"https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png" ).resize((5_12, 5_12) )
lowerCAmelCase_ : Optional[Any] = pipe(
_snake_case , _snake_case , control_image=_snake_case , generator=_snake_case , output_type="np" , num_inference_steps=50 , strength=0.6 , )
lowerCAmelCase_ : List[str] = output.images[0]
assert image.shape == (5_12, 5_12, 3)
lowerCAmelCase_ : Any = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy" )
assert np.abs(expected_image - image ).max() < 9e-2
| 241
|
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
snake_case_ : List[str] = logging.get_logger(__name__)
snake_case_ : Tuple = {
"Salesforce/codegen-350M-nl": "https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json",
"Salesforce/codegen-350M-multi": "https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json",
"Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json",
"Salesforce/codegen-2B-nl": "https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json",
"Salesforce/codegen-2B-multi": "https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json",
"Salesforce/codegen-2B-mono": "https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json",
"Salesforce/codegen-6B-nl": "https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json",
"Salesforce/codegen-6B-multi": "https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json",
"Salesforce/codegen-6B-mono": "https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json",
"Salesforce/codegen-16B-nl": "https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json",
"Salesforce/codegen-16B-multi": "https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json",
"Salesforce/codegen-16B-mono": "https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json",
}
class __snake_case ( a ):
UpperCAmelCase__ : str = '''codegen'''
UpperCAmelCase__ : int = {
'''max_position_embeddings''': '''n_positions''',
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self : Union[str, Any] , _snake_case : Union[str, Any]=50400 , _snake_case : Optional[int]=2048 , _snake_case : Union[str, Any]=2048 , _snake_case : List[str]=4096 , _snake_case : Any=28 , _snake_case : List[str]=16 , _snake_case : int=64 , _snake_case : Tuple=None , _snake_case : Dict="gelu_new" , _snake_case : Union[str, Any]=0.0 , _snake_case : Optional[Any]=0.0 , _snake_case : List[Any]=0.0 , _snake_case : List[Any]=1e-5 , _snake_case : List[str]=0.0_2 , _snake_case : Optional[Any]=True , _snake_case : int=50256 , _snake_case : Tuple=50256 , _snake_case : int=False , **_snake_case : Any , ):
"""simple docstring"""
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = n_ctx
UpperCAmelCase_ = n_positions
UpperCAmelCase_ = n_embd
UpperCAmelCase_ = n_layer
UpperCAmelCase_ = n_head
UpperCAmelCase_ = n_inner
UpperCAmelCase_ = rotary_dim
UpperCAmelCase_ = activation_function
UpperCAmelCase_ = resid_pdrop
UpperCAmelCase_ = embd_pdrop
UpperCAmelCase_ = attn_pdrop
UpperCAmelCase_ = layer_norm_epsilon
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = use_cache
UpperCAmelCase_ = bos_token_id
UpperCAmelCase_ = eos_token_id
super().__init__(
bos_token_id=_snake_case , eos_token_id=_snake_case , tie_word_embeddings=_snake_case , **_snake_case)
class __snake_case ( a ):
def __init__( self : Tuple , _snake_case : PretrainedConfig , _snake_case : str = "default" , _snake_case : List[PatchingSpec] = None , _snake_case : bool = False , ):
"""simple docstring"""
super().__init__(_snake_case , task=_snake_case , patching_specs=_snake_case , use_past=_snake_case)
if not getattr(self._config , '''pad_token_id''' , _snake_case):
# TODO: how to do that better?
UpperCAmelCase_ = 0
@property
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}})
if self.use_past:
self.fill_with_past_key_values_(_snake_case , direction='''inputs''')
UpperCAmelCase_ = {0: '''batch''', 1: '''past_sequence + sequence'''}
else:
UpperCAmelCase_ = {0: '''batch''', 1: '''sequence'''}
return common_inputs
@property
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
return self._config.n_layer
@property
def lowerCamelCase ( self : int):
"""simple docstring"""
return self._config.n_head
def lowerCamelCase ( self : Optional[int] , _snake_case : PreTrainedTokenizer , _snake_case : int = -1 , _snake_case : int = -1 , _snake_case : bool = False , _snake_case : Optional[TensorType] = None , ):
"""simple docstring"""
UpperCAmelCase_ = super(_snake_case , self).generate_dummy_inputs(
_snake_case , batch_size=_snake_case , seq_length=_snake_case , is_pair=_snake_case , framework=_snake_case)
# We need to order the input in the way they appears in the forward()
UpperCAmelCase_ = OrderedDict({'''input_ids''': common_inputs['''input_ids''']})
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''')
else:
import torch
UpperCAmelCase_ , UpperCAmelCase_ = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
UpperCAmelCase_ = seqlen + 2
UpperCAmelCase_ = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
UpperCAmelCase_ = [
(torch.zeros(_snake_case), torch.zeros(_snake_case)) for _ in range(self.num_layers)
]
UpperCAmelCase_ = common_inputs['''attention_mask''']
if self.use_past:
UpperCAmelCase_ = ordered_inputs['''attention_mask'''].dtype
UpperCAmelCase_ = torch.cat(
[ordered_inputs['''attention_mask'''], torch.ones(_snake_case , _snake_case , dtype=_snake_case)] , dim=1)
return ordered_inputs
@property
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
return 13
| 51
| 0
|
'''simple docstring'''
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def lowerCAmelCase (__A):
"""simple docstring"""
_a , _a = image.size
_a , _a = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
_a = image.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''])
_a = np.array(__A).astype(np.floataa) / 2_55.0
_a = image[None].transpose(0 , 3 , 1 , 2)
_a = torch.from_numpy(__A)
return 2.0 * image - 1.0
class __A ( A ):
'''simple docstring'''
def __init__(self , A , A , A , ) -> int:
"""simple docstring"""
super().__init__()
self.register_modules(vqvae=_snake_case , unet=_snake_case , scheduler=_snake_case )
@torch.no_grad()
def __call__(self , A = None , A = 1 , A = 100 , A = 0.0 , A = None , A = "pil" , A = True , ) -> str:
"""simple docstring"""
if isinstance(_snake_case , PIL.Image.Image ):
_a = 1
elif isinstance(_snake_case , torch.Tensor ):
_a = image.shape[0]
else:
raise ValueError(f'''`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(_snake_case )}''' )
if isinstance(_snake_case , PIL.Image.Image ):
_a = preprocess(_snake_case )
_a , _a = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
_a = (batch_size, self.unet.config.in_channels // 2, height, width)
_a = next(self.unet.parameters() ).dtype
_a = randn_tensor(_snake_case , generator=_snake_case , device=self.device , dtype=_snake_case )
_a = image.to(device=self.device , dtype=_snake_case )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(_snake_case , device=self.device )
_a = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
_a = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
_a = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
_a = {}
if accepts_eta:
_a = eta
for t in self.progress_bar(_snake_case ):
# concat latents and low resolution image in the channel dimension.
_a = torch.cat([latents, image] , dim=1 )
_a = self.scheduler.scale_model_input(_snake_case , _snake_case )
# predict the noise residual
_a = self.unet(_snake_case , _snake_case ).sample
# compute the previous noisy sample x_t -> x_t-1
_a = self.scheduler.step(_snake_case , _snake_case , _snake_case , **_snake_case ).prev_sample
# decode the image latents with the VQVAE
_a = self.vqvae.decode(_snake_case ).sample
_a = torch.clamp(_snake_case , -1.0 , 1.0 )
_a = image / 2 + 0.5
_a = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_a = self.numpy_to_pil(_snake_case )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_snake_case )
| 211
|
import os
import unittest
from transformers.models.phobert.tokenization_phobert import VOCAB_FILES_NAMES, PhobertTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class __snake_case ( a , unittest.TestCase ):
UpperCAmelCase__ : Any = PhobertTokenizer
UpperCAmelCase__ : List[str] = False
def lowerCamelCase ( self : str):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCAmelCase_ = ['''T@@''', '''i''', '''I''', '''R@@''', '''r''', '''e@@''']
UpperCAmelCase_ = dict(zip(_snake_case , range(len(_snake_case))))
UpperCAmelCase_ = ['''#version: 0.2''', '''l à</w>''']
UpperCAmelCase_ = {'''unk_token''': '''<unk>'''}
UpperCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''])
UpperCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''])
with open(self.vocab_file , '''w''' , encoding='''utf-8''') as fp:
for token in vocab_tokens:
fp.write(F"""{token} {vocab_tokens[token]}\n""")
with open(self.merges_file , '''w''' , encoding='''utf-8''') as fp:
fp.write('''\n'''.join(_snake_case))
def lowerCamelCase ( self : int , **_snake_case : Any):
"""simple docstring"""
kwargs.update(self.special_tokens_map)
return PhobertTokenizer.from_pretrained(self.tmpdirname , **_snake_case)
def lowerCamelCase ( self : Optional[Any] , _snake_case : Union[str, Any]):
"""simple docstring"""
UpperCAmelCase_ = '''Tôi là VinAI Research'''
UpperCAmelCase_ = '''T<unk> i <unk> <unk> <unk> <unk> <unk> <unk> I Re<unk> e<unk> <unk> <unk> <unk>'''
return input_text, output_text
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = PhobertTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map)
UpperCAmelCase_ = '''Tôi là VinAI Research'''
UpperCAmelCase_ = '''T@@ ô@@ i l@@ à V@@ i@@ n@@ A@@ I R@@ e@@ s@@ e@@ a@@ r@@ c@@ h'''.split()
UpperCAmelCase_ = tokenizer.tokenize(_snake_case)
print(_snake_case)
self.assertListEqual(_snake_case , _snake_case)
UpperCAmelCase_ = tokens + [tokenizer.unk_token]
UpperCAmelCase_ = [4, 3, 5, 3, 3, 3, 3, 3, 3, 6, 7, 9, 3, 9, 3, 3, 3, 3, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_snake_case) , _snake_case)
| 51
| 0
|
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
"files" , [
["full:README.md", "dataset_infos.json"],
["empty:README.md", "dataset_infos.json"],
["dataset_infos.json"],
["full:README.md"],
] , )
def _snake_case ( lowerCAmelCase : Tuple , lowerCAmelCase : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = tmp_path_factory.mktemp("dset_infos_dir" )
if "full:README.md" in files:
with open(dataset_infos_dir / "README.md" , "w" ) as f:
f.write("---\ndataset_info:\n dataset_size: 42\n---" )
if "empty:README.md" in files:
with open(dataset_infos_dir / "README.md" , "w" ) as f:
f.write("" )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / "dataset_infos.json" , "w" ) as f:
f.write("{\"default\": {\"dataset_size\": 42}}" )
SCREAMING_SNAKE_CASE_ : str = DatasetInfosDict.from_directory(__A )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 4_2
@pytest.mark.parametrize(
"dataset_info" , [
DatasetInfo(),
DatasetInfo(
description="foo" , features=Features({"a": Value("int32" )} ) , builder_name="builder" , config_name="config" , version="1.0.0" , splits=[{"name": "train"}] , download_size=4_2 , ),
] , )
def _snake_case ( lowerCAmelCase : Optional[int] , lowerCAmelCase : DatasetInfo ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = str(__A )
dataset_info.write_to_directory(__A )
SCREAMING_SNAKE_CASE_ : Optional[int] = DatasetInfo.from_directory(__A )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(__A , "dataset_info.json" ) )
def _snake_case ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = DatasetInfo(
description="foo" , citation="bar" , homepage="https://foo.bar" , license="CC0" , features=Features({"a": Value("int32" )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name="builder" , config_name="config" , version="1.0.0" , splits=[{"name": "train", "num_examples": 4_2}] , download_checksums={} , download_size=1_3_3_7 , post_processing_size=4_4_2 , dataset_size=1_2_3_4 , size_in_bytes=1_3_3_7 + 4_4_2 + 1_2_3_4 , )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = dataset_info._to_yaml_dict()
assert sorted(__A ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) )
SCREAMING_SNAKE_CASE_ : Optional[int] = yaml.safe_dump(__A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = yaml.safe_load(__A )
assert dataset_info_yaml_dict == reloaded
def _snake_case ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = DatasetInfo()
SCREAMING_SNAKE_CASE_ : int = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
"dataset_infos_dict" , [
DatasetInfosDict(),
DatasetInfosDict({"default": DatasetInfo()} ),
DatasetInfosDict({"my_config_name": DatasetInfo()} ),
DatasetInfosDict(
{
"default": DatasetInfo(
description="foo" , features=Features({"a": Value("int32" )} ) , builder_name="builder" , config_name="config" , version="1.0.0" , splits=[{"name": "train"}] , download_size=4_2 , )
} ),
DatasetInfosDict(
{
"v1": DatasetInfo(dataset_size=4_2 ),
"v2": DatasetInfo(dataset_size=1_3_3_7 ),
} ),
] , )
def _snake_case ( lowerCAmelCase : Optional[Any] , lowerCAmelCase : DatasetInfosDict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = str(__A )
dataset_infos_dict.write_to_directory(__A )
SCREAMING_SNAKE_CASE_ : Tuple = DatasetInfosDict.from_directory(__A )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
SCREAMING_SNAKE_CASE_ : Tuple = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
SCREAMING_SNAKE_CASE_ : str = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(__A , "README.md" ) )
| 18
|
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
snake_case_ : Any = logging.get_logger(__name__)
snake_case_ : Optional[int] = TypeVar("DatasetType", Dataset, IterableDataset)
def A (__A : List[DatasetType] , __A : Optional[List[float]] = None , __A : Optional[int] = None , __A : Optional[DatasetInfo] = None , __A : Optional[NamedSplit] = None , __A : Literal["first_exhausted", "all_exhausted"] = "first_exhausted" , ) -> DatasetType:
"""simple docstring"""
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError('''Unable to interleave an empty list of datasets.''' )
for i, dataset in enumerate(__A ):
if not isinstance(__A , (Dataset, IterableDataset) ):
if isinstance(__A , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} """
'''is an empty dataset dictionary.''' )
raise ValueError(
F"""Dataset at position {i} has at least one split: {list(__A )}\n"""
F"""Please pick one to interleave with the other datasets, for example: dataset['{next(iter(__A ) )}']""" )
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(__A ).__name__}.""" )
if i == 0:
UpperCAmelCase_ , UpperCAmelCase_ = (
(Dataset, IterableDataset) if isinstance(__A , __A ) else (IterableDataset, Dataset)
)
elif not isinstance(__A , __A ):
raise ValueError(
F"""Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.""" )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(F"""{stopping_strategy} is not supported. Please enter a valid stopping_strategy.""" )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
__A , __A , __A , info=__A , split=__A , stopping_strategy=__A )
else:
return _interleave_iterable_datasets(
__A , __A , __A , info=__A , split=__A , stopping_strategy=__A )
def A (__A : List[DatasetType] , __A : Optional[DatasetInfo] = None , __A : Optional[NamedSplit] = None , __A : int = 0 , ) -> DatasetType:
"""simple docstring"""
if not dsets:
raise ValueError('''Unable to concatenate an empty list of datasets.''' )
for i, dataset in enumerate(__A ):
if not isinstance(__A , (Dataset, IterableDataset) ):
if isinstance(__A , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} """
'''is an empty dataset dictionary.''' )
raise ValueError(
F"""Dataset at position {i} has at least one split: {list(__A )}\n"""
F"""Please pick one to interleave with the other datasets, for example: dataset['{next(iter(__A ) )}']""" )
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(__A ).__name__}.""" )
if i == 0:
UpperCAmelCase_ , UpperCAmelCase_ = (
(Dataset, IterableDataset) if isinstance(__A , __A ) else (IterableDataset, Dataset)
)
elif not isinstance(__A , __A ):
raise ValueError(
F"""Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.""" )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(__A , info=__A , split=__A , axis=__A )
else:
return _concatenate_iterable_datasets(__A , info=__A , split=__A , axis=__A )
| 51
| 0
|
"""simple docstring"""
def _snake_case ( lowercase__ : int = 1_0_0_0_0_0_0 ) -> int:
'''simple docstring'''
lowerCAmelCase_ :Optional[int] = set(range(3 , __A , 2 ) )
primes.add(2 )
for p in range(3 , __A , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , __A , __A ) ) )
lowerCAmelCase_ :Union[str, Any] = [float(__A ) for n in range(limit + 1 )]
for p in primes:
for n in range(__A , limit + 1 , __A ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 84
|
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
snake_case_ : Optional[Any] = "pt"
elif is_tf_available():
snake_case_ : Union[str, Any] = "tf"
else:
snake_case_ : str = "jax"
class __snake_case ( a , unittest.TestCase ):
UpperCAmelCase__ : List[Any] = ByTaTokenizer
UpperCAmelCase__ : int = False
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
super().setUp()
UpperCAmelCase_ = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname)
@cached_property
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
return ByTaTokenizer.from_pretrained('''google/byt5-small''')
def lowerCamelCase ( self : List[str] , **_snake_case : Union[str, Any]):
"""simple docstring"""
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_snake_case)
def lowerCamelCase ( self : Dict , _snake_case : int , _snake_case : Tuple=False , _snake_case : Dict=20 , _snake_case : Optional[Any]=5):
"""simple docstring"""
UpperCAmelCase_ = []
for i in range(len(_snake_case)):
try:
UpperCAmelCase_ = tokenizer.decode([i] , clean_up_tokenization_spaces=_snake_case)
except UnicodeDecodeError:
pass
toks.append((i, tok))
UpperCAmelCase_ = list(filter(lambda _snake_case: re.match(r'''^[ a-zA-Z]+$''' , t[1]) , _snake_case))
UpperCAmelCase_ = list(filter(lambda _snake_case: [t[0]] == tokenizer.encode(t[1] , add_special_tokens=_snake_case) , _snake_case))
if max_length is not None and len(_snake_case) > max_length:
UpperCAmelCase_ = toks[:max_length]
if min_length is not None and len(_snake_case) < min_length and len(_snake_case) > 0:
while len(_snake_case) < min_length:
UpperCAmelCase_ = toks + toks
# toks_str = [t[1] for t in toks]
UpperCAmelCase_ = [t[0] for t in toks]
# Ensure consistency
UpperCAmelCase_ = tokenizer.decode(_snake_case , clean_up_tokenization_spaces=_snake_case)
if " " not in output_txt and len(_snake_case) > 1:
UpperCAmelCase_ = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=_snake_case)
+ ''' '''
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=_snake_case)
)
if with_prefix_space:
UpperCAmelCase_ = ''' ''' + output_txt
UpperCAmelCase_ = tokenizer.encode(_snake_case , add_special_tokens=_snake_case)
return output_txt, output_ids
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
UpperCAmelCase_ = self.ta_base_tokenizer
UpperCAmelCase_ = tokenizer(['''hi</s>''', '''I went to the gym</s>''', '''</s>'''])
UpperCAmelCase_ = tokenizer(['''hi''', '''I went to the gym''', ''''''])
self.assertListEqual(batch_with_eos_added['''input_ids'''] , batch_without_eos_added['''input_ids'''])
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ = self.ta_base_tokenizer
UpperCAmelCase_ = '''Unicode €.'''
UpperCAmelCase_ = tokenizer(_snake_case)
UpperCAmelCase_ = [88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1]
self.assertEqual(encoded['''input_ids'''] , _snake_case)
# decoding
UpperCAmelCase_ = tokenizer.decode(_snake_case)
self.assertEqual(_snake_case , '''Unicode €.</s>''')
UpperCAmelCase_ = tokenizer('''e è é ê ë''')
UpperCAmelCase_ = [104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1]
self.assertEqual(encoded['''input_ids'''] , _snake_case)
# decoding
UpperCAmelCase_ = tokenizer.decode(_snake_case)
self.assertEqual(_snake_case , '''e è é ê ë</s>''')
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('''e è é ê ë''')) , '''e è é ê ë</s>''')
def lowerCamelCase ( self : Any):
"""simple docstring"""
UpperCAmelCase_ = self.ta_base_tokenizer
UpperCAmelCase_ = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
# fmt: off
UpperCAmelCase_ = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0]
# fmt: on
UpperCAmelCase_ = tokenizer(_snake_case , padding=_snake_case , return_tensors=_snake_case)
self.assertIsInstance(_snake_case , _snake_case)
if FRAMEWORK != "jax":
UpperCAmelCase_ = list(batch.input_ids.numpy()[0])
else:
UpperCAmelCase_ = list(batch.input_ids.tolist()[0])
self.assertListEqual(_snake_case , _snake_case)
self.assertEqual((2, 37) , batch.input_ids.shape)
self.assertEqual((2, 37) , batch.attention_mask.shape)
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = self.ta_base_tokenizer
UpperCAmelCase_ = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
UpperCAmelCase_ = tokenizer(_snake_case , padding=_snake_case , return_tensors=_snake_case)
# check if input_ids are returned and no decoder_input_ids
self.assertIn('''input_ids''' , _snake_case)
self.assertIn('''attention_mask''' , _snake_case)
self.assertNotIn('''decoder_input_ids''' , _snake_case)
self.assertNotIn('''decoder_attention_mask''' , _snake_case)
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = self.ta_base_tokenizer
UpperCAmelCase_ = [
'''Summary of the text.''',
'''Another summary.''',
]
UpperCAmelCase_ = tokenizer(
text_target=_snake_case , max_length=32 , padding='''max_length''' , truncation=_snake_case , return_tensors=_snake_case)
self.assertEqual(32 , targets['''input_ids'''].shape[1])
def lowerCamelCase ( self : int):
"""simple docstring"""
UpperCAmelCase_ = self.ta_base_tokenizer
UpperCAmelCase_ = ['''A long paragraph for summarization. </s>''']
UpperCAmelCase_ = ['''Summary of the text. </s>''']
# fmt: off
UpperCAmelCase_ = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1]
UpperCAmelCase_ = [86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1]
# fmt: on
UpperCAmelCase_ = tokenizer(_snake_case , text_target=_snake_case)
self.assertEqual(_snake_case , batch['''input_ids'''][0])
self.assertEqual(_snake_case , batch['''labels'''][0])
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}"""):
self.assertNotEqual(tokenizer.model_max_length , 42)
# Now let's start the test
UpperCAmelCase_ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}"""):
# Isolate this from the other tests because we save additional tokens/etc
UpperCAmelCase_ = tempfile.mkdtemp()
UpperCAmelCase_ = ''' He is very happy, UNwant\u00E9d,running'''
UpperCAmelCase_ = tokenizer.encode(_snake_case , add_special_tokens=_snake_case)
tokenizer.save_pretrained(_snake_case)
UpperCAmelCase_ = tokenizer.__class__.from_pretrained(_snake_case)
UpperCAmelCase_ = after_tokenizer.encode(_snake_case , add_special_tokens=_snake_case)
self.assertListEqual(_snake_case , _snake_case)
shutil.rmtree(_snake_case)
UpperCAmelCase_ = self.get_tokenizers(model_max_length=42)
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}"""):
# Isolate this from the other tests because we save additional tokens/etc
UpperCAmelCase_ = tempfile.mkdtemp()
UpperCAmelCase_ = ''' He is very happy, UNwant\u00E9d,running'''
tokenizer.add_tokens(['''bim''', '''bambam'''])
UpperCAmelCase_ = tokenizer.additional_special_tokens
additional_special_tokens.append('''new_additional_special_token''')
tokenizer.add_special_tokens({'''additional_special_tokens''': additional_special_tokens})
UpperCAmelCase_ = tokenizer.encode(_snake_case , add_special_tokens=_snake_case)
tokenizer.save_pretrained(_snake_case)
UpperCAmelCase_ = tokenizer.__class__.from_pretrained(_snake_case)
UpperCAmelCase_ = after_tokenizer.encode(_snake_case , add_special_tokens=_snake_case)
self.assertListEqual(_snake_case , _snake_case)
self.assertIn('''new_additional_special_token''' , after_tokenizer.additional_special_tokens)
self.assertEqual(after_tokenizer.model_max_length , 42)
UpperCAmelCase_ = tokenizer.__class__.from_pretrained(_snake_case , model_max_length=43)
self.assertEqual(tokenizer.model_max_length , 43)
shutil.rmtree(_snake_case)
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()))
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()))
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_snake_case)
with open(os.path.join(_snake_case , '''special_tokens_map.json''') , encoding='''utf-8''') as json_file:
UpperCAmelCase_ = json.load(_snake_case)
with open(os.path.join(_snake_case , '''tokenizer_config.json''') , encoding='''utf-8''') as json_file:
UpperCAmelCase_ = json.load(_snake_case)
UpperCAmelCase_ = [F"""<extra_id_{i}>""" for i in range(125)]
UpperCAmelCase_ = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
UpperCAmelCase_ = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
with open(os.path.join(_snake_case , '''special_tokens_map.json''') , '''w''' , encoding='''utf-8''') as outfile:
json.dump(_snake_case , _snake_case)
with open(os.path.join(_snake_case , '''tokenizer_config.json''') , '''w''' , encoding='''utf-8''') as outfile:
json.dump(_snake_case , _snake_case)
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
UpperCAmelCase_ = tokenizer_class.from_pretrained(
_snake_case , )
self.assertIn(
'''an_additional_special_token''' , tokenizer_without_change_in_init.additional_special_tokens)
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
['''an_additional_special_token'''] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['''an_additional_special_token'''])) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
UpperCAmelCase_ = added_tokens_extra_ids + [AddedToken('''a_new_additional_special_token''' , lstrip=_snake_case)]
UpperCAmelCase_ = tokenizer_class.from_pretrained(
_snake_case , additional_special_tokens=_snake_case , )
self.assertIn('''a_new_additional_special_token''' , tokenizer.additional_special_tokens)
self.assertEqual(
['''a_new_additional_special_token'''] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['''a_new_additional_special_token'''])) , )
def lowerCamelCase ( self : Any):
"""simple docstring"""
UpperCAmelCase_ = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()))
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()))
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_snake_case)
UpperCAmelCase_ = tokenizer_class.from_pretrained(_snake_case)
self.assertTrue(tokenizer.decode([255]) == '''''')
def lowerCamelCase ( self : int):
"""simple docstring"""
pass
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
pass
def lowerCamelCase ( self : Dict):
"""simple docstring"""
pass
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
pass
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = self.get_tokenizers(fast=_snake_case , do_lower_case=_snake_case)
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}"""):
UpperCAmelCase_ = ['''t''', '''h''', '''i''', '''s''', ''' ''', '''i''', '''s''', ''' ''', '''a''', ''' ''', '''t''', '''e''', '''x''', '''t''', '''</s>''']
UpperCAmelCase_ = tokenizer.convert_tokens_to_string(_snake_case)
self.assertIsInstance(_snake_case , _snake_case)
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
UpperCAmelCase_ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}"""):
UpperCAmelCase_ = [
'''bos_token''',
'''eos_token''',
'''unk_token''',
'''sep_token''',
'''pad_token''',
'''cls_token''',
'''mask_token''',
]
UpperCAmelCase_ = 0
UpperCAmelCase_ = tokenizer.convert_ids_to_tokens(
_snake_case , skip_special_tokens=_snake_case)
for attr in attributes_list:
setattr(_snake_case , attr + '''_id''' , _snake_case)
self.assertEqual(getattr(_snake_case , _snake_case) , _snake_case)
self.assertEqual(getattr(_snake_case , attr + '''_id''') , _snake_case)
setattr(_snake_case , attr + '''_id''' , _snake_case)
self.assertEqual(getattr(_snake_case , _snake_case) , _snake_case)
self.assertEqual(getattr(_snake_case , attr + '''_id''') , _snake_case)
setattr(_snake_case , '''additional_special_tokens_ids''' , [])
self.assertListEqual(getattr(_snake_case , '''additional_special_tokens''') , [])
self.assertListEqual(getattr(_snake_case , '''additional_special_tokens_ids''') , [])
setattr(_snake_case , '''additional_special_tokens_ids''' , [token_id_to_test_setters])
self.assertListEqual(getattr(_snake_case , '''additional_special_tokens''') , [token_to_test_setters])
self.assertListEqual(getattr(_snake_case , '''additional_special_tokens_ids''') , [token_id_to_test_setters])
| 51
| 0
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_funnel import FunnelTokenizer
UpperCamelCase__: Optional[int] = logging.get_logger(__name__)
UpperCamelCase__: str = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
UpperCamelCase__: int = [
"small",
"small-base",
"medium",
"medium-base",
"intermediate",
"intermediate-base",
"large",
"large-base",
"xlarge",
"xlarge-base",
]
UpperCamelCase__: Dict = {
"vocab_file": {
"funnel-transformer/small": "https://huggingface.co/funnel-transformer/small/resolve/main/vocab.txt",
"funnel-transformer/small-base": "https://huggingface.co/funnel-transformer/small-base/resolve/main/vocab.txt",
"funnel-transformer/medium": "https://huggingface.co/funnel-transformer/medium/resolve/main/vocab.txt",
"funnel-transformer/medium-base": (
"https://huggingface.co/funnel-transformer/medium-base/resolve/main/vocab.txt"
),
"funnel-transformer/intermediate": (
"https://huggingface.co/funnel-transformer/intermediate/resolve/main/vocab.txt"
),
"funnel-transformer/intermediate-base": (
"https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/vocab.txt"
),
"funnel-transformer/large": "https://huggingface.co/funnel-transformer/large/resolve/main/vocab.txt",
"funnel-transformer/large-base": "https://huggingface.co/funnel-transformer/large-base/resolve/main/vocab.txt",
"funnel-transformer/xlarge": "https://huggingface.co/funnel-transformer/xlarge/resolve/main/vocab.txt",
"funnel-transformer/xlarge-base": (
"https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"funnel-transformer/small": "https://huggingface.co/funnel-transformer/small/resolve/main/tokenizer.json",
"funnel-transformer/small-base": (
"https://huggingface.co/funnel-transformer/small-base/resolve/main/tokenizer.json"
),
"funnel-transformer/medium": "https://huggingface.co/funnel-transformer/medium/resolve/main/tokenizer.json",
"funnel-transformer/medium-base": (
"https://huggingface.co/funnel-transformer/medium-base/resolve/main/tokenizer.json"
),
"funnel-transformer/intermediate": (
"https://huggingface.co/funnel-transformer/intermediate/resolve/main/tokenizer.json"
),
"funnel-transformer/intermediate-base": (
"https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/tokenizer.json"
),
"funnel-transformer/large": "https://huggingface.co/funnel-transformer/large/resolve/main/tokenizer.json",
"funnel-transformer/large-base": (
"https://huggingface.co/funnel-transformer/large-base/resolve/main/tokenizer.json"
),
"funnel-transformer/xlarge": "https://huggingface.co/funnel-transformer/xlarge/resolve/main/tokenizer.json",
"funnel-transformer/xlarge-base": (
"https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/tokenizer.json"
),
},
}
UpperCamelCase__: int = {F"funnel-transformer/{name}": 512 for name in _model_names}
UpperCamelCase__: Tuple = {F"funnel-transformer/{name}": {"do_lower_case": True} for name in _model_names}
class SCREAMING_SNAKE_CASE( A__ ):
"""simple docstring"""
lowerCamelCase__ = VOCAB_FILES_NAMES
lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ = PRETRAINED_INIT_CONFIGURATION
lowerCamelCase__ = FunnelTokenizer
lowerCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ = 2
def __init__( self : List[str] , __snake_case : str=None , __snake_case : Tuple=None , __snake_case : int=True , __snake_case : Optional[Any]="<unk>" , __snake_case : List[Any]="<sep>" , __snake_case : Dict="<pad>" , __snake_case : List[Any]="<cls>" , __snake_case : str="<mask>" , __snake_case : Optional[Any]="<s>" , __snake_case : List[str]="</s>" , __snake_case : int=True , __snake_case : Any=True , __snake_case : int=None , __snake_case : List[Any]="##" , **__snake_case : Tuple , ) -> Tuple:
super().__init__(
_snake_case , tokenizer_file=_snake_case , do_lower_case=_snake_case , unk_token=_snake_case , sep_token=_snake_case , pad_token=_snake_case , cls_token=_snake_case , mask_token=_snake_case , bos_token=_snake_case , eos_token=_snake_case , clean_text=_snake_case , tokenize_chinese_chars=_snake_case , strip_accents=_snake_case , wordpieces_prefix=_snake_case , **_snake_case , )
UpperCAmelCase : Dict = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , _snake_case ) != do_lower_case
or normalizer_state.get('''strip_accents''' , _snake_case ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , _snake_case ) != tokenize_chinese_chars
):
UpperCAmelCase : Any = getattr(_snake_case , normalizer_state.pop('''type''' ) )
UpperCAmelCase : List[Any] = do_lower_case
UpperCAmelCase : int = strip_accents
UpperCAmelCase : Optional[int] = tokenize_chinese_chars
UpperCAmelCase : Union[str, Any] = normalizer_class(**_snake_case )
UpperCAmelCase : Dict = do_lower_case
def A ( self : Tuple , __snake_case : str , __snake_case : Optional[Any]=None ) -> Union[str, Any]:
UpperCAmelCase : List[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def A ( self : List[Any] , __snake_case : List[int] , __snake_case : Optional[List[int]] = None ) -> str:
UpperCAmelCase : Optional[Any] = [self.sep_token_id]
UpperCAmelCase : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0]
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def A ( self : Tuple , __snake_case : str , __snake_case : Optional[str] = None ) -> Optional[Any]:
UpperCAmelCase : str = self._tokenizer.model.save(_snake_case , name=_snake_case )
return tuple(_snake_case )
| 23
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
snake_case_ : Dict = {"configuration_mbart": ["MBART_PRETRAINED_CONFIG_ARCHIVE_MAP", "MBartConfig", "MBartOnnxConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Tuple = ["MBartTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : str = ["MBartTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : List[Any] = [
"MBART_PRETRAINED_MODEL_ARCHIVE_LIST",
"MBartForCausalLM",
"MBartForConditionalGeneration",
"MBartForQuestionAnswering",
"MBartForSequenceClassification",
"MBartModel",
"MBartPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Any = [
"TFMBartForConditionalGeneration",
"TFMBartModel",
"TFMBartPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : List[str] = [
"FlaxMBartForConditionalGeneration",
"FlaxMBartForQuestionAnswering",
"FlaxMBartForSequenceClassification",
"FlaxMBartModel",
"FlaxMBartPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart import MBartTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart_fast import MBartTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mbart import (
MBART_PRETRAINED_MODEL_ARCHIVE_LIST,
MBartForCausalLM,
MBartForConditionalGeneration,
MBartForQuestionAnswering,
MBartForSequenceClassification,
MBartModel,
MBartPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mbart import (
FlaxMBartForConditionalGeneration,
FlaxMBartForQuestionAnswering,
FlaxMBartForSequenceClassification,
FlaxMBartModel,
FlaxMBartPreTrainedModel,
)
else:
import sys
snake_case_ : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 51
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_UpperCAmelCase : List[str] = {"configuration_xglm": ["XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XGLMConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : int = ["XGLMTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Optional[Any] = ["XGLMTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Dict = [
"XGLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"XGLMForCausalLM",
"XGLMModel",
"XGLMPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Any = [
"FlaxXGLMForCausalLM",
"FlaxXGLMModel",
"FlaxXGLMPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Optional[Any] = [
"TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXGLMForCausalLM",
"TFXGLMModel",
"TFXGLMPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
_UpperCAmelCase : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 50
|
import warnings
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __snake_case ( a ):
UpperCAmelCase__ : Dict = ['''image_processor''', '''tokenizer''']
UpperCAmelCase__ : Dict = '''FlavaImageProcessor'''
UpperCAmelCase__ : Dict = ('''BertTokenizer''', '''BertTokenizerFast''')
def __init__( self : Union[str, Any] , _snake_case : List[str]=None , _snake_case : str=None , **_snake_case : int):
"""simple docstring"""
UpperCAmelCase_ = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , _snake_case , )
UpperCAmelCase_ = kwargs.pop('''feature_extractor''')
UpperCAmelCase_ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''')
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''')
super().__init__(_snake_case , _snake_case)
UpperCAmelCase_ = self.image_processor
def __call__( self : List[Any] , _snake_case : Optional[ImageInput] = None , _snake_case : Optional[Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]]] = None , _snake_case : bool = True , _snake_case : Union[bool, str, PaddingStrategy] = False , _snake_case : Union[bool, str, TruncationStrategy] = False , _snake_case : Optional[int] = None , _snake_case : int = 0 , _snake_case : Optional[int] = None , _snake_case : Optional[bool] = None , _snake_case : Optional[bool] = None , _snake_case : Optional[bool] = None , _snake_case : Optional[bool] = None , _snake_case : bool = False , _snake_case : bool = False , _snake_case : bool = False , _snake_case : bool = False , _snake_case : bool = True , _snake_case : Optional[Union[str, TensorType]] = None , **_snake_case : Any , ):
"""simple docstring"""
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''')
if text is not None:
UpperCAmelCase_ = self.tokenizer(
text=_snake_case , add_special_tokens=_snake_case , padding=_snake_case , truncation=_snake_case , max_length=_snake_case , stride=_snake_case , pad_to_multiple_of=_snake_case , return_token_type_ids=_snake_case , return_attention_mask=_snake_case , return_overflowing_tokens=_snake_case , return_special_tokens_mask=_snake_case , return_offsets_mapping=_snake_case , return_length=_snake_case , verbose=_snake_case , return_tensors=_snake_case , **_snake_case , )
if images is not None:
UpperCAmelCase_ = self.image_processor(
_snake_case , return_image_mask=_snake_case , return_codebook_pixels=_snake_case , return_tensors=_snake_case , **_snake_case , )
if text is not None and images is not None:
encoding.update(_snake_case)
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_snake_case) , tensor_type=_snake_case)
def lowerCamelCase ( self : Any , *_snake_case : Optional[Any] , **_snake_case : int):
"""simple docstring"""
return self.tokenizer.batch_decode(*_snake_case , **_snake_case)
def lowerCamelCase ( self : Optional[int] , *_snake_case : int , **_snake_case : Dict):
"""simple docstring"""
return self.tokenizer.decode(*_snake_case , **_snake_case)
@property
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ = self.tokenizer.model_input_names
UpperCAmelCase_ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
@property
def lowerCamelCase ( self : str):
"""simple docstring"""
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , _snake_case , )
return self.image_processor_class
@property
def lowerCamelCase ( self : Any):
"""simple docstring"""
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , _snake_case , )
return self.image_processor
| 51
| 0
|
from argparse import ArgumentParser
from datasets.commands.convert import ConvertCommand
from datasets.commands.dummy_data import DummyDataCommand
from datasets.commands.env import EnvironmentCommand
from datasets.commands.run_beam import RunBeamCommand
from datasets.commands.test import TestCommand
from datasets.utils.logging import set_verbosity_info
def lowerCAmelCase_ ( _lowercase : Tuple) -> int:
"""simple docstring"""
return {key.lstrip("""-"""): value for key, value in zip(unknown_args[::2] , unknown_args[1::2])}
def lowerCAmelCase_ ( ) -> Dict:
"""simple docstring"""
a__ : Union[str, Any] = ArgumentParser(
"""HuggingFace Datasets CLI tool""" , usage="""datasets-cli <command> [<args>]""" , allow_abbrev=__A)
a__ : Union[str, Any] = parser.add_subparsers(help="""datasets-cli command helpers""")
set_verbosity_info()
# Register commands
ConvertCommand.register_subcommand(__A)
EnvironmentCommand.register_subcommand(__A)
TestCommand.register_subcommand(__A)
RunBeamCommand.register_subcommand(__A)
DummyDataCommand.register_subcommand(__A)
# Parse args
a__ , a__ : Optional[int] = parser.parse_known_args()
if not hasattr(__A , """func"""):
parser.print_help()
exit(1)
a__ : Any = parse_unknown_args(__A)
# Run
a__ : Optional[Any] = args.func(__A , **__A)
service.run()
if __name__ == "__main__":
main()
| 170
|
from diffusers.utils.testing_utils import require_onnxruntime
@require_onnxruntime
class __snake_case :
pass
| 51
| 0
|
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
lowercase__ : Any = logging.get_logger(__name__)
lowercase__ : Optional[int] = TypeVar("DatasetType", Dataset, IterableDataset)
def A_ ( snake_case : List[DatasetType] , snake_case : Optional[List[float]] = None , snake_case : Optional[int] = None , snake_case : Optional[DatasetInfo] = None , snake_case : Optional[NamedSplit] = None , snake_case : Literal["first_exhausted", "all_exhausted"] = "first_exhausted" , ) -> DatasetType:
'''simple docstring'''
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError('''Unable to interleave an empty list of datasets.''' )
for i, dataset in enumerate(__A ):
if not isinstance(__A , (Dataset, IterableDataset) ):
if isinstance(__A , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} "
'''is an empty dataset dictionary.''' )
raise ValueError(
f"Dataset at position {i} has at least one split: {list(__A )}\n"
f"Please pick one to interleave with the other datasets, for example: dataset['{next(iter(__A ) )}']" )
raise ValueError(
f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(__A ).__name__}." )
if i == 0:
__UpperCamelCase , __UpperCamelCase = (
(Dataset, IterableDataset) if isinstance(__A , __A ) else (IterableDataset, Dataset)
)
elif not isinstance(__A , __A ):
raise ValueError(
f"Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects." )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(f"{stopping_strategy} is not supported. Please enter a valid stopping_strategy." )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
__A , __A , __A , info=__A , split=__A , stopping_strategy=__A )
else:
return _interleave_iterable_datasets(
__A , __A , __A , info=__A , split=__A , stopping_strategy=__A )
def A_ ( snake_case : List[DatasetType] , snake_case : Optional[DatasetInfo] = None , snake_case : Optional[NamedSplit] = None , snake_case : int = 0 , ) -> DatasetType:
'''simple docstring'''
if not dsets:
raise ValueError('''Unable to concatenate an empty list of datasets.''' )
for i, dataset in enumerate(__A ):
if not isinstance(__A , (Dataset, IterableDataset) ):
if isinstance(__A , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} "
'''is an empty dataset dictionary.''' )
raise ValueError(
f"Dataset at position {i} has at least one split: {list(__A )}\n"
f"Please pick one to interleave with the other datasets, for example: dataset['{next(iter(__A ) )}']" )
raise ValueError(
f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(__A ).__name__}." )
if i == 0:
__UpperCamelCase , __UpperCamelCase = (
(Dataset, IterableDataset) if isinstance(__A , __A ) else (IterableDataset, Dataset)
)
elif not isinstance(__A , __A ):
raise ValueError(
f"Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects." )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(__A , info=__A , split=__A , axis=__A )
else:
return _concatenate_iterable_datasets(__A , info=__A , split=__A , axis=__A )
| 328
|
import argparse
import os
import pickle
import sys
import torch
from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl
from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils
from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
# We do this to be able to load python 2 datasets pickles
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
snake_case_ : List[Any] = data_utils.TransfoXLTokenizer
snake_case_ : int = data_utils.TransfoXLCorpus
snake_case_ : List[Any] = data_utils
snake_case_ : int = data_utils
def A (__A : Dict , __A : List[Any] , __A : Union[str, Any] , __A : Tuple ) -> Union[str, Any]:
"""simple docstring"""
if transfo_xl_dataset_file:
# Convert a pre-processed corpus (see original TensorFlow repo)
with open(__A , '''rb''' ) as fp:
UpperCAmelCase_ = pickle.load(__A , encoding='''latin1''' )
# Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
UpperCAmelCase_ = pytorch_dump_folder_path + '''/''' + VOCAB_FILES_NAMES['''pretrained_vocab_file''']
print(F"""Save vocabulary to {pytorch_vocab_dump_path}""" )
UpperCAmelCase_ = corpus.vocab.__dict__
torch.save(__A , __A )
UpperCAmelCase_ = corpus.__dict__
corpus_dict_no_vocab.pop('''vocab''' , __A )
UpperCAmelCase_ = pytorch_dump_folder_path + '''/''' + CORPUS_NAME
print(F"""Save dataset to {pytorch_dataset_dump_path}""" )
torch.save(__A , __A )
if tf_checkpoint_path:
# Convert a pre-trained TensorFlow model
UpperCAmelCase_ = os.path.abspath(__A )
UpperCAmelCase_ = os.path.abspath(__A )
print(F"""Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.""" )
# Initialise PyTorch model
if transfo_xl_config_file == "":
UpperCAmelCase_ = TransfoXLConfig()
else:
UpperCAmelCase_ = TransfoXLConfig.from_json_file(__A )
print(F"""Building PyTorch model from configuration: {config}""" )
UpperCAmelCase_ = TransfoXLLMHeadModel(__A )
UpperCAmelCase_ = load_tf_weights_in_transfo_xl(__A , __A , __A )
# Save pytorch-model
UpperCAmelCase_ = os.path.join(__A , __A )
UpperCAmelCase_ = os.path.join(__A , __A )
print(F"""Save PyTorch model to {os.path.abspath(__A )}""" )
torch.save(model.state_dict() , __A )
print(F"""Save configuration file to {os.path.abspath(__A )}""" )
with open(__A , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
snake_case_ : List[str] = argparse.ArgumentParser()
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=True,
help="Path to the folder to store the PyTorch model or dataset/vocab.",
)
parser.add_argument(
"--tf_checkpoint_path",
default="",
type=str,
help="An optional path to a TensorFlow checkpoint path to be converted.",
)
parser.add_argument(
"--transfo_xl_config_file",
default="",
type=str,
help=(
"An optional config json file corresponding to the pre-trained BERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--transfo_xl_dataset_file",
default="",
type=str,
help="An optional dataset file to be converted in a vocabulary.",
)
snake_case_ : int = parser.parse_args()
convert_transfo_xl_checkpoint_to_pytorch(
args.tf_checkpoint_path,
args.transfo_xl_config_file,
args.pytorch_dump_folder_path,
args.transfo_xl_dataset_file,
)
| 51
| 0
|
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> int:
if not isinstance(__A, __A ):
raise ValueError("""multiplicative_persistence() only accepts integral values""" )
if num < 0:
raise ValueError("""multiplicative_persistence() does not accept negative values""" )
A_ = 0
A_ = str(__A )
while len(__A ) != 1:
A_ = [int(__A ) for i in num_string]
A_ = 1
for i in range(0, len(__A ) ):
total *= numbers[i]
A_ = str(__A )
steps += 1
return steps
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> int:
if not isinstance(__A, __A ):
raise ValueError("""additive_persistence() only accepts integral values""" )
if num < 0:
raise ValueError("""additive_persistence() does not accept negative values""" )
A_ = 0
A_ = str(__A )
while len(__A ) != 1:
A_ = [int(__A ) for i in num_string]
A_ = 0
for i in range(0, len(__A ) ):
total += numbers[i]
A_ = str(__A )
steps += 1
return steps
if __name__ == "__main__":
import doctest
doctest.testmod()
| 162
|
from typing import Optional, Tuple, Union
import torch
from einops import rearrange, reduce
from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNetaDConditionModel
from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput
from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput
snake_case_ : List[str] = 8
def A (__A : Union[str, Any] , __A : List[Any]=BITS ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ = x.device
UpperCAmelCase_ = (x * 255).int().clamp(0 , 255 )
UpperCAmelCase_ = 2 ** torch.arange(bits - 1 , -1 , -1 , device=__A )
UpperCAmelCase_ = rearrange(__A , '''d -> d 1 1''' )
UpperCAmelCase_ = rearrange(__A , '''b c h w -> b c 1 h w''' )
UpperCAmelCase_ = ((x & mask) != 0).float()
UpperCAmelCase_ = rearrange(__A , '''b c d h w -> b (c d) h w''' )
UpperCAmelCase_ = bits * 2 - 1
return bits
def A (__A : Dict , __A : Tuple=BITS ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ = x.device
UpperCAmelCase_ = (x > 0).int()
UpperCAmelCase_ = 2 ** torch.arange(bits - 1 , -1 , -1 , device=__A , dtype=torch.intaa )
UpperCAmelCase_ = rearrange(__A , '''d -> d 1 1''' )
UpperCAmelCase_ = rearrange(__A , '''b (c d) h w -> b c d h w''' , d=8 )
UpperCAmelCase_ = reduce(x * mask , '''b c d h w -> b c h w''' , '''sum''' )
return (dec / 255).clamp(0.0 , 1.0 )
def A (self : List[Any] , __A : torch.FloatTensor , __A : int , __A : torch.FloatTensor , __A : float = 0.0 , __A : bool = True , __A : Tuple=None , __A : bool = True , ) -> Union[DDIMSchedulerOutput, Tuple]:
"""simple docstring"""
if self.num_inference_steps is None:
raise ValueError(
'''Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler''' )
# See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf
# Ideally, read DDIM paper in-detail understanding
# Notation (<variable name> -> <name in paper>
# - pred_noise_t -> e_theta(x_t, t)
# - pred_original_sample -> f_theta(x_t, t) or x_0
# - std_dev_t -> sigma_t
# - eta -> η
# - pred_sample_direction -> "direction pointing to x_t"
# - pred_prev_sample -> "x_t-1"
# 1. get previous step value (=t-1)
UpperCAmelCase_ = timestep - self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
UpperCAmelCase_ = self.alphas_cumprod[timestep]
UpperCAmelCase_ = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
UpperCAmelCase_ = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
UpperCAmelCase_ = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
# 4. Clip "predicted x_0"
UpperCAmelCase_ = self.bit_scale
if self.config.clip_sample:
UpperCAmelCase_ = torch.clamp(__A , -scale , __A )
# 5. compute variance: "sigma_t(η)" -> see formula (16)
# σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1)
UpperCAmelCase_ = self._get_variance(__A , __A )
UpperCAmelCase_ = eta * variance ** 0.5
if use_clipped_model_output:
# the model_output is always re-derived from the clipped x_0 in Glide
UpperCAmelCase_ = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
# 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
UpperCAmelCase_ = (1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output
# 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
UpperCAmelCase_ = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if eta > 0:
# randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072
UpperCAmelCase_ = model_output.device if torch.is_tensor(__A ) else '''cpu'''
UpperCAmelCase_ = torch.randn(model_output.shape , dtype=model_output.dtype , generator=__A ).to(__A )
UpperCAmelCase_ = self._get_variance(__A , __A ) ** 0.5 * eta * noise
UpperCAmelCase_ = prev_sample + variance
if not return_dict:
return (prev_sample,)
return DDIMSchedulerOutput(prev_sample=__A , pred_original_sample=__A )
def A (self : Optional[int] , __A : torch.FloatTensor , __A : int , __A : torch.FloatTensor , __A : int="epsilon" , __A : Optional[Any]=None , __A : bool = True , ) -> Union[DDPMSchedulerOutput, Tuple]:
"""simple docstring"""
UpperCAmelCase_ = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]:
UpperCAmelCase_ , UpperCAmelCase_ = torch.split(__A , sample.shape[1] , dim=1 )
else:
UpperCAmelCase_ = None
# 1. compute alphas, betas
UpperCAmelCase_ = self.alphas_cumprod[t]
UpperCAmelCase_ = self.alphas_cumprod[t - 1] if t > 0 else self.one
UpperCAmelCase_ = 1 - alpha_prod_t
UpperCAmelCase_ = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if prediction_type == "epsilon":
UpperCAmelCase_ = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif prediction_type == "sample":
UpperCAmelCase_ = model_output
else:
raise ValueError(F"""Unsupported prediction_type {prediction_type}.""" )
# 3. Clip "predicted x_0"
UpperCAmelCase_ = self.bit_scale
if self.config.clip_sample:
UpperCAmelCase_ = torch.clamp(__A , -scale , __A )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
UpperCAmelCase_ = (alpha_prod_t_prev ** 0.5 * self.betas[t]) / beta_prod_t
UpperCAmelCase_ = self.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
UpperCAmelCase_ = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
UpperCAmelCase_ = 0
if t > 0:
UpperCAmelCase_ = torch.randn(
model_output.size() , dtype=model_output.dtype , layout=model_output.layout , generator=__A ).to(model_output.device )
UpperCAmelCase_ = (self._get_variance(__A , predicted_variance=__A ) ** 0.5) * noise
UpperCAmelCase_ = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return DDPMSchedulerOutput(prev_sample=__A , pred_original_sample=__A )
class __snake_case ( a ):
def __init__( self : Union[str, Any] , _snake_case : UNetaDConditionModel , _snake_case : Union[DDIMScheduler, DDPMScheduler] , _snake_case : Optional[float] = 1.0 , ):
"""simple docstring"""
super().__init__()
UpperCAmelCase_ = bit_scale
UpperCAmelCase_ = (
ddim_bit_scheduler_step if isinstance(_snake_case , _snake_case) else ddpm_bit_scheduler_step
)
self.register_modules(unet=_snake_case , scheduler=_snake_case)
@torch.no_grad()
def __call__( self : Union[str, Any] , _snake_case : Optional[int] = 256 , _snake_case : Optional[int] = 256 , _snake_case : Optional[int] = 50 , _snake_case : Optional[torch.Generator] = None , _snake_case : Optional[int] = 1 , _snake_case : Optional[str] = "pil" , _snake_case : bool = True , **_snake_case : Optional[Any] , ):
"""simple docstring"""
UpperCAmelCase_ = torch.randn(
(batch_size, self.unet.config.in_channels, height, width) , generator=_snake_case , )
UpperCAmelCase_ = decimal_to_bits(_snake_case) * self.bit_scale
UpperCAmelCase_ = latents.to(self.device)
self.scheduler.set_timesteps(_snake_case)
for t in self.progress_bar(self.scheduler.timesteps):
# predict the noise residual
UpperCAmelCase_ = self.unet(_snake_case , _snake_case).sample
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase_ = self.scheduler.step(_snake_case , _snake_case , _snake_case).prev_sample
UpperCAmelCase_ = bits_to_decimal(_snake_case)
if output_type == "pil":
UpperCAmelCase_ = self.numpy_to_pil(_snake_case)
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_snake_case)
| 51
| 0
|
'''simple docstring'''
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class __lowercase ( unittest.TestCase ):
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
lowerCamelCase_ : List[Any] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_snake_case )
lowerCamelCase_ : Optional[int] = -1
lowerCamelCase_ : str = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_snake_case )
lowerCamelCase_ : str = model.generate(_snake_case , max_new_tokens=1_0 , do_sample=_snake_case )
lowerCamelCase_ : Any = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
lowerCamelCase_ : Union[str, Any] = TextStreamer(_snake_case )
model.generate(_snake_case , max_new_tokens=1_0 , do_sample=_snake_case , streamer=_snake_case )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
lowerCamelCase_ : Union[str, Any] = cs.out[:-1]
self.assertEqual(_snake_case , _snake_case )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : str = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
lowerCamelCase_ : Dict = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_snake_case )
lowerCamelCase_ : Union[str, Any] = -1
lowerCamelCase_ : List[Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_snake_case )
lowerCamelCase_ : Dict = model.generate(_snake_case , max_new_tokens=1_0 , do_sample=_snake_case )
lowerCamelCase_ : Union[str, Any] = tokenizer.decode(greedy_ids[0] )
lowerCamelCase_ : List[Any] = TextIteratorStreamer(_snake_case )
lowerCamelCase_ : str = {'''input_ids''': input_ids, '''max_new_tokens''': 1_0, '''do_sample''': False, '''streamer''': streamer}
lowerCamelCase_ : Optional[Any] = Thread(target=model.generate , kwargs=_snake_case )
thread.start()
lowerCamelCase_ : Optional[Any] = ''''''
for new_text in streamer:
streamer_text += new_text
self.assertEqual(_snake_case , _snake_case )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Dict = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
lowerCamelCase_ : List[str] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_snake_case )
lowerCamelCase_ : Optional[Any] = -1
lowerCamelCase_ : Optional[Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_snake_case )
lowerCamelCase_ : List[str] = model.generate(_snake_case , max_new_tokens=1_0 , do_sample=_snake_case )
lowerCamelCase_ : Dict = greedy_ids[:, input_ids.shape[1] :]
lowerCamelCase_ : int = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
lowerCamelCase_ : int = TextStreamer(_snake_case , skip_prompt=_snake_case )
model.generate(_snake_case , max_new_tokens=1_0 , do_sample=_snake_case , streamer=_snake_case )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
lowerCamelCase_ : List[Any] = cs.out[:-1]
self.assertEqual(_snake_case , _snake_case )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Any = AutoTokenizer.from_pretrained('''distilgpt2''' )
lowerCamelCase_ : Dict = AutoModelForCausalLM.from_pretrained('''distilgpt2''' ).to(_snake_case )
lowerCamelCase_ : Any = -1
lowerCamelCase_ : Optional[Any] = torch.ones((1, 5) , device=_snake_case ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
lowerCamelCase_ : Dict = TextStreamer(_snake_case , skip_special_tokens=_snake_case )
model.generate(_snake_case , max_new_tokens=1 , do_sample=_snake_case , streamer=_snake_case )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
lowerCamelCase_ : Union[str, Any] = cs.out[:-1] # Remove the final "\n"
lowerCamelCase_ : str = tokenizer(_snake_case , return_tensors='''pt''' )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Tuple = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
lowerCamelCase_ : Optional[int] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_snake_case )
lowerCamelCase_ : Union[str, Any] = -1
lowerCamelCase_ : List[Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_snake_case )
lowerCamelCase_ : Tuple = TextIteratorStreamer(_snake_case , timeout=0.0_01 )
lowerCamelCase_ : Any = {'''input_ids''': input_ids, '''max_new_tokens''': 1_0, '''do_sample''': False, '''streamer''': streamer}
lowerCamelCase_ : int = Thread(target=model.generate , kwargs=_snake_case )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(_snake_case ):
lowerCamelCase_ : Optional[Any] = ''''''
for new_text in streamer:
streamer_text += new_text
| 318
|
snake_case_ : Dict = {
"Pillow": "Pillow",
"accelerate": "accelerate>=0.11.0",
"compel": "compel==0.1.8",
"black": "black~=23.1",
"datasets": "datasets",
"filelock": "filelock",
"flax": "flax>=0.4.1",
"hf-doc-builder": "hf-doc-builder>=0.3.0",
"huggingface-hub": "huggingface-hub>=0.13.2",
"requests-mock": "requests-mock==1.10.0",
"importlib_metadata": "importlib_metadata",
"invisible-watermark": "invisible-watermark",
"isort": "isort>=5.5.4",
"jax": "jax>=0.2.8,!=0.3.2",
"jaxlib": "jaxlib>=0.1.65",
"Jinja2": "Jinja2",
"k-diffusion": "k-diffusion>=0.0.12",
"torchsde": "torchsde",
"note_seq": "note_seq",
"librosa": "librosa",
"numpy": "numpy",
"omegaconf": "omegaconf",
"parameterized": "parameterized",
"protobuf": "protobuf>=3.20.3,<4",
"pytest": "pytest",
"pytest-timeout": "pytest-timeout",
"pytest-xdist": "pytest-xdist",
"ruff": "ruff>=0.0.241",
"safetensors": "safetensors",
"sentencepiece": "sentencepiece>=0.1.91,!=0.1.92",
"scipy": "scipy",
"onnx": "onnx",
"regex": "regex!=2019.12.17",
"requests": "requests",
"tensorboard": "tensorboard",
"torch": "torch>=1.4",
"torchvision": "torchvision",
"transformers": "transformers>=4.25.1",
"urllib3": "urllib3<=2.0.0",
}
| 51
| 0
|
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
__snake_case = 0
__snake_case = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
__snake_case = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
__snake_case = tuple[int, int]
class __lowerCamelCase :
def __init__( self: Any,A_: int,A_: int,A_: int,A_: int,A_: int,A_: Node | None,):
'''simple docstring'''
__UpperCamelCase = pos_x
__UpperCamelCase = pos_y
__UpperCamelCase = (pos_y, pos_x)
__UpperCamelCase = goal_x
__UpperCamelCase = goal_y
__UpperCamelCase = g_cost
__UpperCamelCase = parent
__UpperCamelCase = self.calculate_heuristic()
__UpperCamelCase = self.g_cost + self.h_cost
def snake_case_ ( self: List[str] ):
'''simple docstring'''
__UpperCamelCase = self.pos_x - self.goal_x
__UpperCamelCase = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(_snake_case ) + abs(_snake_case )
else:
return sqrt(dy**2 + dx**2 )
def __lt__( self: Union[str, Any],A_: Node ):
'''simple docstring'''
return self.f_cost < other.f_cost
class __lowerCamelCase :
def __init__( self: str,A_: TPosition,A_: TPosition ):
'''simple docstring'''
__UpperCamelCase = Node(start[1],start[0],goal[1],goal[0],0,_snake_case )
__UpperCamelCase = Node(goal[1],goal[0],goal[1],goal[0],9_9999,_snake_case )
__UpperCamelCase = [self.start]
__UpperCamelCase = []
__UpperCamelCase = False
def snake_case_ ( self: Optional[int] ):
'''simple docstring'''
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
__UpperCamelCase = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(_snake_case )
self.closed_nodes.append(_snake_case )
__UpperCamelCase = self.get_successors(_snake_case )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(_snake_case )
else:
# retrieve the best current path
__UpperCamelCase = self.open_nodes.pop(self.open_nodes.index(_snake_case ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(_snake_case )
else:
self.open_nodes.append(_snake_case )
return [self.start.pos]
def snake_case_ ( self: Tuple,A_: Node ):
'''simple docstring'''
__UpperCamelCase = []
for action in delta:
__UpperCamelCase = parent.pos_x + action[1]
__UpperCamelCase = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(_snake_case ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
_snake_case,_snake_case,self.target.pos_y,self.target.pos_x,parent.g_cost + 1,_snake_case,) )
return successors
def snake_case_ ( self: Any,A_: Node | None ):
'''simple docstring'''
__UpperCamelCase = node
__UpperCamelCase = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
__UpperCamelCase = current_node.parent
path.reverse()
return path
class __lowerCamelCase :
def __init__( self: Any,A_: TPosition,A_: TPosition ):
'''simple docstring'''
__UpperCamelCase = AStar(_snake_case,_snake_case )
__UpperCamelCase = AStar(_snake_case,_snake_case )
__UpperCamelCase = False
def snake_case_ ( self: List[Any] ):
'''simple docstring'''
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
__UpperCamelCase = self.fwd_astar.open_nodes.pop(0 )
__UpperCamelCase = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
_snake_case,_snake_case )
self.fwd_astar.closed_nodes.append(_snake_case )
self.bwd_astar.closed_nodes.append(_snake_case )
__UpperCamelCase = current_bwd_node
__UpperCamelCase = current_fwd_node
__UpperCamelCase = {
self.fwd_astar: self.fwd_astar.get_successors(_snake_case ),
self.bwd_astar: self.bwd_astar.get_successors(_snake_case ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(_snake_case )
else:
# retrieve the best current path
__UpperCamelCase = astar.open_nodes.pop(
astar.open_nodes.index(_snake_case ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(_snake_case )
else:
astar.open_nodes.append(_snake_case )
return [self.fwd_astar.start.pos]
def snake_case_ ( self: int,A_: Node,A_: Node ):
'''simple docstring'''
__UpperCamelCase = self.fwd_astar.retrace_path(_snake_case )
__UpperCamelCase = self.bwd_astar.retrace_path(_snake_case )
bwd_path.pop()
bwd_path.reverse()
__UpperCamelCase = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
__snake_case = (0, 0)
__snake_case = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
__snake_case = time.time()
__snake_case = AStar(init, goal)
__snake_case = a_star.search()
__snake_case = time.time() - start_time
print(f"""AStar execution time = {end_time:f} seconds""")
__snake_case = time.time()
__snake_case = BidirectionalAStar(init, goal)
__snake_case = time.time() - bd_start_time
print(f"""BidirectionalAStar execution time = {bd_end_time:f} seconds""")
| 310
|
from datetime import datetime
import requests
def A (__A : str ) -> bytes:
"""simple docstring"""
UpperCAmelCase_ = '''https://downloadgram.net/wp-json/wppress/video-downloader/video?url='''
UpperCAmelCase_ = requests.get(base_url + url ).json()[0]['''urls'''][0]['''src''']
return requests.get(__A ).content
if __name__ == "__main__":
snake_case_ : Optional[Any] = input("Enter Video/IGTV url: ").strip()
snake_case_ : Any = f"{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4"
with open(file_name, "wb") as fp:
fp.write(download_video(url))
print(f"Done. Video saved to disk as {file_name}.")
| 51
| 0
|
"""simple docstring"""
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=A__ )
class __lowerCamelCase ( A__ ):
'''simple docstring'''
a_ : str = field(default="""automatic-speech-recognition""" , metadata={"""include_in_asdict_even_if_is_default""": True} )
a_ : ClassVar[Features] = Features({"""audio""": Audio()} )
a_ : ClassVar[Features] = Features({"""transcription""": Value("""string""" )} )
a_ : str = "audio"
a_ : str = "transcription"
def lowerCamelCase ( self : Union[str, Any] , a_ : Tuple ):
if self.audio_column not in features:
raise ValueError(f'''Column {self.audio_column} is not present in features.''' )
if not isinstance(features[self.audio_column] , _snake_case ):
raise ValueError(f'''Column {self.audio_column} is not an Audio type.''' )
lowerCAmelCase_ : int = copy.deepcopy(self )
lowerCAmelCase_ : List[Any] = self.input_schema.copy()
lowerCAmelCase_ : Dict = features[self.audio_column]
lowerCAmelCase_ : List[Any] = input_schema
return task_template
@property
def lowerCamelCase ( self : Optional[int] ):
return {self.audio_column: "audio", self.transcription_column: "transcription"}
| 241
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ : Any = logging.get_logger(__name__)
snake_case_ : Optional[Any] = {
"tiiuae/falcon-40b": "https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json",
"tiiuae/falcon-7b": "https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json",
}
class __snake_case ( a ):
UpperCAmelCase__ : Optional[Any] = '''falcon'''
UpperCAmelCase__ : List[Any] = ['''past_key_values''']
def __init__( self : Union[str, Any] , _snake_case : List[str]=65024 , _snake_case : int=4544 , _snake_case : int=32 , _snake_case : Any=71 , _snake_case : int=1e-5 , _snake_case : Dict=0.0_2 , _snake_case : int=True , _snake_case : List[Any]=0.0 , _snake_case : Tuple=0.0 , _snake_case : int=None , _snake_case : Tuple=False , _snake_case : Any=False , _snake_case : str=True , _snake_case : Any=True , _snake_case : List[str]=False , _snake_case : Tuple=11 , _snake_case : Dict=11 , **_snake_case : Optional[int] , ):
"""simple docstring"""
UpperCAmelCase_ = vocab_size
# Backward compatibility with n_embed kwarg
UpperCAmelCase_ = kwargs.pop('''n_embed''' , _snake_case)
UpperCAmelCase_ = hidden_size if n_embed is None else n_embed
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = layer_norm_epsilon
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = use_cache
UpperCAmelCase_ = hidden_dropout
UpperCAmelCase_ = attention_dropout
UpperCAmelCase_ = bos_token_id
UpperCAmelCase_ = eos_token_id
UpperCAmelCase_ = num_attention_heads if num_kv_heads is None else num_kv_heads
UpperCAmelCase_ = alibi
UpperCAmelCase_ = new_decoder_architecture
UpperCAmelCase_ = multi_query # Ignored when new_decoder_architecture is True
UpperCAmelCase_ = parallel_attn
UpperCAmelCase_ = bias
super().__init__(bos_token_id=_snake_case , eos_token_id=_snake_case , **_snake_case)
@property
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
return self.hidden_size // self.num_attention_heads
@property
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
return not self.alibi
| 51
| 0
|
'''simple docstring'''
import numpy as np
import torch
from imwatermark import WatermarkEncoder
# Copied from https://github.com/Stability-AI/generative-models/blob/613af104c6b85184091d42d374fef420eddb356d/scripts/demo/streamlit_helpers.py#L66
lowercase_ = 0B10_11_00_11_11_10_11_00_10_01_00_00_01_11_10_11_10_11_00_01_10_01_11_10
# bin(x)[2:] gives bits of x as str, use int to convert them to 0/1
lowercase_ = [int(bit) for bit in bin(WATERMARK_MESSAGE)[2:]]
class __A :
'''simple docstring'''
def __init__(self ) -> Optional[Any]:
"""simple docstring"""
_a = WATERMARK_BITS
_a = WatermarkEncoder()
self.encoder.set_watermark('''bits''' , self.watermark )
def a__ (self , A ) -> str:
"""simple docstring"""
if images.shape[-1] < 256:
return images
_a = (255 * (images / 2 + 0.5)).cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
_a = [self.encoder.encode(_snake_case , '''dwtDct''' ) for image in images]
_a = torch.from_numpy(np.array(_snake_case ) ).permute(0 , 3 , 1 , 2 )
_a = torch.clamp(2 * (images / 255 - 0.5) , min=-1.0 , max=1.0 )
return images
| 211
|
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
snake_case_ : str = 0
snake_case_ : Union[str, Any] = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
snake_case_ : str = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
snake_case_ : List[Any] = tuple[int, int]
class __snake_case :
def __init__( self : Any , _snake_case : int , _snake_case : int , _snake_case : int , _snake_case : int , _snake_case : int , _snake_case : Node | None , ):
"""simple docstring"""
UpperCAmelCase_ = pos_x
UpperCAmelCase_ = pos_y
UpperCAmelCase_ = (pos_y, pos_x)
UpperCAmelCase_ = goal_x
UpperCAmelCase_ = goal_y
UpperCAmelCase_ = g_cost
UpperCAmelCase_ = parent
UpperCAmelCase_ = self.calculate_heuristic()
UpperCAmelCase_ = self.g_cost + self.h_cost
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = self.pos_x - self.goal_x
UpperCAmelCase_ = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(_snake_case) + abs(_snake_case)
else:
return sqrt(dy**2 + dx**2)
def __lt__( self : Union[str, Any] , _snake_case : Node):
"""simple docstring"""
return self.f_cost < other.f_cost
class __snake_case :
def __init__( self : str , _snake_case : TPosition , _snake_case : TPosition):
"""simple docstring"""
UpperCAmelCase_ = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , _snake_case)
UpperCAmelCase_ = Node(goal[1] , goal[0] , goal[1] , goal[0] , 99999 , _snake_case)
UpperCAmelCase_ = [self.start]
UpperCAmelCase_ = []
UpperCAmelCase_ = False
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
UpperCAmelCase_ = self.open_nodes.pop(0)
if current_node.pos == self.target.pos:
return self.retrace_path(_snake_case)
self.closed_nodes.append(_snake_case)
UpperCAmelCase_ = self.get_successors(_snake_case)
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(_snake_case)
else:
# retrieve the best current path
UpperCAmelCase_ = self.open_nodes.pop(self.open_nodes.index(_snake_case))
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(_snake_case)
else:
self.open_nodes.append(_snake_case)
return [self.start.pos]
def lowerCamelCase ( self : Tuple , _snake_case : Node):
"""simple docstring"""
UpperCAmelCase_ = []
for action in delta:
UpperCAmelCase_ = parent.pos_x + action[1]
UpperCAmelCase_ = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0]) - 1 and 0 <= pos_y <= len(_snake_case) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
_snake_case , _snake_case , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , _snake_case , ))
return successors
def lowerCamelCase ( self : Any , _snake_case : Node | None):
"""simple docstring"""
UpperCAmelCase_ = node
UpperCAmelCase_ = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x))
UpperCAmelCase_ = current_node.parent
path.reverse()
return path
class __snake_case :
def __init__( self : Any , _snake_case : TPosition , _snake_case : TPosition):
"""simple docstring"""
UpperCAmelCase_ = AStar(_snake_case , _snake_case)
UpperCAmelCase_ = AStar(_snake_case , _snake_case)
UpperCAmelCase_ = False
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
UpperCAmelCase_ = self.fwd_astar.open_nodes.pop(0)
UpperCAmelCase_ = self.bwd_astar.open_nodes.pop(0)
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
_snake_case , _snake_case)
self.fwd_astar.closed_nodes.append(_snake_case)
self.bwd_astar.closed_nodes.append(_snake_case)
UpperCAmelCase_ = current_bwd_node
UpperCAmelCase_ = current_fwd_node
UpperCAmelCase_ = {
self.fwd_astar: self.fwd_astar.get_successors(_snake_case),
self.bwd_astar: self.bwd_astar.get_successors(_snake_case),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(_snake_case)
else:
# retrieve the best current path
UpperCAmelCase_ = astar.open_nodes.pop(
astar.open_nodes.index(_snake_case))
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(_snake_case)
else:
astar.open_nodes.append(_snake_case)
return [self.fwd_astar.start.pos]
def lowerCamelCase ( self : int , _snake_case : Node , _snake_case : Node):
"""simple docstring"""
UpperCAmelCase_ = self.fwd_astar.retrace_path(_snake_case)
UpperCAmelCase_ = self.bwd_astar.retrace_path(_snake_case)
bwd_path.pop()
bwd_path.reverse()
UpperCAmelCase_ = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
snake_case_ : Any = (0, 0)
snake_case_ : Union[str, Any] = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
snake_case_ : str = time.time()
snake_case_ : List[str] = AStar(init, goal)
snake_case_ : Optional[int] = a_star.search()
snake_case_ : Optional[Any] = time.time() - start_time
print(f"AStar execution time = {end_time:f} seconds")
snake_case_ : int = time.time()
snake_case_ : Dict = BidirectionalAStar(init, goal)
snake_case_ : str = time.time() - bd_start_time
print(f"BidirectionalAStar execution time = {bd_end_time:f} seconds")
| 51
| 0
|
import os
from pathlib import Path
import numpy as np
import pytest
from pack_dataset import pack_data_dir
from parameterized import parameterized
from save_len_file import save_len_file
from torch.utils.data import DataLoader
from transformers import AutoTokenizer
from transformers.models.mbart.modeling_mbart import shift_tokens_right
from transformers.testing_utils import TestCasePlus, slow
from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset
__lowerCamelCase : int = "bert-base-cased"
__lowerCamelCase : List[Any] = "google/pegasus-xsum"
__lowerCamelCase : Optional[Any] = [" Sam ate lunch today.", "Sams lunch ingredients."]
__lowerCamelCase : Any = ["A very interesting story about what I ate for lunch.", "Avocado, celery, turkey, coffee"]
__lowerCamelCase : List[str] = "patrickvonplaten/t5-tiny-random"
__lowerCamelCase : Tuple = "sshleifer/bart-tiny-random"
__lowerCamelCase : List[str] = "sshleifer/tiny-mbart"
__lowerCamelCase : List[Any] = "sshleifer/tiny-marian-en-de"
def _snake_case ( lowerCAmelCase : Path , lowerCAmelCase : list ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = "\n".join(__A )
Path(__A ).open("w" ).writelines(__A )
def _snake_case ( lowerCAmelCase : str ):
"""simple docstring"""
for split in ["train", "val", "test"]:
_dump_articles(os.path.join(__A , f'{split}.source' ) , __A )
_dump_articles(os.path.join(__A , f'{split}.target' ) , __A )
return tmp_dir
class a__ ( A__ ):
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
],)
@slow
def __UpperCamelCase ( self : List[str],_A : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = AutoTokenizer.from_pretrained(_snake_case )
SCREAMING_SNAKE_CASE_ : Optional[Any] = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
SCREAMING_SNAKE_CASE_ : Optional[Any] = max(len(tokenizer.encode(_snake_case ) ) for a in ARTICLES )
SCREAMING_SNAKE_CASE_ : Any = max(len(tokenizer.encode(_snake_case ) ) for a in SUMMARIES )
SCREAMING_SNAKE_CASE_ : str = 4
SCREAMING_SNAKE_CASE_ : List[str] = 8
assert max_len_target > max_src_len # Will be truncated
assert max_len_source > max_src_len # Will be truncated
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = "ro_RO", "de_DE" # ignored for all but mbart, but never causes error.
SCREAMING_SNAKE_CASE_ : Optional[Any] = SeqaSeqDataset(
_snake_case,data_dir=_snake_case,type_path="train",max_source_length=_snake_case,max_target_length=_snake_case,src_lang=_snake_case,tgt_lang=_snake_case,)
SCREAMING_SNAKE_CASE_ : str = DataLoader(_snake_case,batch_size=2,collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert isinstance(_snake_case,_snake_case )
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_src_len
# show that targets are the same len
assert batch["labels"].shape[1] == max_tgt_len
if tok_name != MBART_TINY:
continue
# check language codes in correct place
SCREAMING_SNAKE_CASE_ : Optional[int] = shift_tokens_right(batch["labels"],tokenizer.pad_token_id )
assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang]
assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang]
break # No need to test every batch
@parameterized.expand([BART_TINY, BERT_BASE_CASED] )
def __UpperCamelCase ( self : Optional[int],_A : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = AutoTokenizer.from_pretrained(_snake_case )
SCREAMING_SNAKE_CASE_ : List[Any] = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
SCREAMING_SNAKE_CASE_ : Optional[Any] = max(len(tokenizer.encode(_snake_case ) ) for a in ARTICLES )
SCREAMING_SNAKE_CASE_ : List[Any] = max(len(tokenizer.encode(_snake_case ) ) for a in SUMMARIES )
SCREAMING_SNAKE_CASE_ : List[str] = 4
SCREAMING_SNAKE_CASE_ : Tuple = LegacySeqaSeqDataset(
_snake_case,data_dir=_snake_case,type_path="train",max_source_length=20,max_target_length=_snake_case,)
SCREAMING_SNAKE_CASE_ : List[Any] = DataLoader(_snake_case,batch_size=2,collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_len_source
assert 20 >= batch["input_ids"].shape[1] # trimmed significantly
# show that targets were truncated
assert batch["labels"].shape[1] == trunc_target # Truncated
assert max_len_target > trunc_target # Truncated
break # No need to test every batch
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = AutoTokenizer.from_pretrained("facebook/mbart-large-cc25" )
SCREAMING_SNAKE_CASE_ : List[str] = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
SCREAMING_SNAKE_CASE_ : Dict = tmp_dir.joinpath("train.source" ).open().readlines()
SCREAMING_SNAKE_CASE_ : List[str] = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
pack_data_dir(_snake_case,_snake_case,128,_snake_case )
SCREAMING_SNAKE_CASE_ : Dict = {x.name for x in tmp_dir.iterdir()}
SCREAMING_SNAKE_CASE_ : Tuple = {x.name for x in save_dir.iterdir()}
SCREAMING_SNAKE_CASE_ : List[Any] = save_dir.joinpath("train.source" ).open().readlines()
# orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.']
# desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.']
assert len(_snake_case ) < len(_snake_case )
assert len(_snake_case ) == 1
assert len(packed_examples[0] ) == sum(len(_snake_case ) for x in orig_examples )
assert orig_paths == new_paths
@pytest.mark.skipif(not FAIRSEQ_AVAILABLE,reason="This test requires fairseq" )
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
if not FAIRSEQ_AVAILABLE:
return
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = self._get_dataset(max_len=64 )
SCREAMING_SNAKE_CASE_ : Dict = 64
SCREAMING_SNAKE_CASE_ : Optional[Any] = ds.make_dynamic_sampler(_snake_case,required_batch_size_multiple=_snake_case )
SCREAMING_SNAKE_CASE_ : Optional[Any] = [len(_snake_case ) for x in batch_sampler]
assert len(set(_snake_case ) ) > 1 # it's not dynamic batch size if every batch is the same length
assert sum(_snake_case ) == len(_snake_case ) # no dropped or added examples
SCREAMING_SNAKE_CASE_ : Optional[Any] = DataLoader(_snake_case,batch_sampler=_snake_case,collate_fn=ds.collate_fn,num_workers=2 )
SCREAMING_SNAKE_CASE_ : Tuple = []
SCREAMING_SNAKE_CASE_ : str = []
for batch in data_loader:
SCREAMING_SNAKE_CASE_ : Optional[int] = batch["input_ids"].shape
SCREAMING_SNAKE_CASE_ : List[str] = src_shape[0]
assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple
SCREAMING_SNAKE_CASE_ : int = np.product(batch["input_ids"].shape )
num_src_per_batch.append(_snake_case )
if num_src_tokens > (max_tokens * 1.1):
failures.append(_snake_case )
assert num_src_per_batch[0] == max(_snake_case )
if failures:
raise AssertionError(F'too many tokens in {len(_snake_case )} batches' )
def __UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = self._get_dataset(max_len=512 )
SCREAMING_SNAKE_CASE_ : List[Any] = 2
SCREAMING_SNAKE_CASE_ : int = ds.make_sortish_sampler(_snake_case,shuffle=_snake_case )
SCREAMING_SNAKE_CASE_ : List[str] = DataLoader(_snake_case,batch_size=_snake_case,collate_fn=ds.collate_fn,num_workers=2 )
SCREAMING_SNAKE_CASE_ : List[Any] = DataLoader(_snake_case,batch_size=_snake_case,collate_fn=ds.collate_fn,num_workers=2,sampler=_snake_case )
SCREAMING_SNAKE_CASE_ : Any = tokenizer.pad_token_id
def count_pad_tokens(_A : List[Any],_A : Tuple="input_ids" ):
return [batch[k].eq(_snake_case ).sum().item() for batch in data_loader]
assert sum(count_pad_tokens(_snake_case,k="labels" ) ) < sum(count_pad_tokens(_snake_case,k="labels" ) )
assert sum(count_pad_tokens(_snake_case ) ) < sum(count_pad_tokens(_snake_case ) )
assert len(_snake_case ) == len(_snake_case )
def __UpperCamelCase ( self : Optional[Any],_A : Dict=1000,_A : str=128 ):
"""simple docstring"""
if os.getenv("USE_REAL_DATA",_snake_case ):
SCREAMING_SNAKE_CASE_ : int = "examples/seq2seq/wmt_en_ro"
SCREAMING_SNAKE_CASE_ : Optional[int] = max_len * 2 * 64
if not Path(_snake_case ).joinpath("train.len" ).exists():
save_len_file(_snake_case,_snake_case )
else:
SCREAMING_SNAKE_CASE_ : Any = "examples/seq2seq/test_data/wmt_en_ro"
SCREAMING_SNAKE_CASE_ : int = max_len * 4
save_len_file(_snake_case,_snake_case )
SCREAMING_SNAKE_CASE_ : Dict = AutoTokenizer.from_pretrained(_snake_case )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = SeqaSeqDataset(
_snake_case,data_dir=_snake_case,type_path="train",max_source_length=_snake_case,max_target_length=_snake_case,n_obs=_snake_case,)
return ds, max_tokens, tokenizer
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = self._get_dataset()
SCREAMING_SNAKE_CASE_ : Optional[int] = set(DistributedSortishSampler(_snake_case,256,num_replicas=2,rank=0,add_extra_examples=_snake_case ) )
SCREAMING_SNAKE_CASE_ : Dict = set(DistributedSortishSampler(_snake_case,256,num_replicas=2,rank=1,add_extra_examples=_snake_case ) )
assert idsa.intersection(_snake_case ) == set()
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
],)
def __UpperCamelCase ( self : List[str],_A : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = AutoTokenizer.from_pretrained(_snake_case,use_fast=_snake_case )
if tok_name == MBART_TINY:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = SeqaSeqDataset(
_snake_case,data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ),type_path="train",max_source_length=4,max_target_length=8,src_lang="EN",tgt_lang="FR",)
SCREAMING_SNAKE_CASE_ : int = train_dataset.dataset_kwargs
assert "src_lang" in kwargs and "tgt_lang" in kwargs
else:
SCREAMING_SNAKE_CASE_ : Dict = SeqaSeqDataset(
_snake_case,data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ),type_path="train",max_source_length=4,max_target_length=8,)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = train_dataset.dataset_kwargs
assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs
assert len(_snake_case ) == 1 if tok_name == BART_TINY else len(_snake_case ) == 0
| 18
|
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class __snake_case :
def __init__( self : List[str] , _snake_case : Union[str, Any] , _snake_case : List[str]=2 , _snake_case : Any=True , _snake_case : Any=False , _snake_case : List[str]=10 , _snake_case : Any=3 , _snake_case : Union[str, Any]=32 * 4 , _snake_case : List[Any]=32 * 6 , _snake_case : Tuple=4 , _snake_case : Dict=32 , ):
"""simple docstring"""
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = is_training
UpperCAmelCase_ = use_auxiliary_loss
UpperCAmelCase_ = num_queries
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = min_size
UpperCAmelCase_ = max_size
UpperCAmelCase_ = num_labels
UpperCAmelCase_ = mask_feature_size
def lowerCamelCase ( self : Any):
"""simple docstring"""
UpperCAmelCase_ = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size]).to(
_snake_case)
UpperCAmelCase_ = torch.ones([self.batch_size, self.min_size, self.max_size] , device=_snake_case)
UpperCAmelCase_ = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=_snake_case) > 0.5
).float()
UpperCAmelCase_ = (torch.rand((self.batch_size, self.num_labels) , device=_snake_case) > 0.5).long()
UpperCAmelCase_ = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def lowerCamelCase ( self : Any):
"""simple docstring"""
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig(
decoder_ffn_dim=128 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , )
def lowerCamelCase ( self : int):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = self.prepare_config_and_inputs()
UpperCAmelCase_ = {'''pixel_values''': pixel_values, '''pixel_mask''': pixel_mask}
return config, inputs_dict
def lowerCamelCase ( self : str , _snake_case : List[Any] , _snake_case : List[str]):
"""simple docstring"""
UpperCAmelCase_ = output.encoder_hidden_states
UpperCAmelCase_ = output.pixel_decoder_hidden_states
UpperCAmelCase_ = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(_snake_case) , len(config.backbone_config.depths))
self.parent.assertTrue(len(_snake_case) , len(config.backbone_config.depths))
self.parent.assertTrue(len(_snake_case) , config.decoder_config.decoder_layers)
def lowerCamelCase ( self : Union[str, Any] , _snake_case : List[str] , _snake_case : int , _snake_case : Optional[Any] , _snake_case : str=False):
"""simple docstring"""
with torch.no_grad():
UpperCAmelCase_ = MaskFormerModel(config=_snake_case)
model.to(_snake_case)
model.eval()
UpperCAmelCase_ = model(pixel_values=_snake_case , pixel_mask=_snake_case)
UpperCAmelCase_ = model(_snake_case , output_hidden_states=_snake_case)
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None)
self.parent.assertTrue(output.encoder_last_hidden_state is not None)
if output_hidden_states:
self.check_output_hidden_state(_snake_case , _snake_case)
def lowerCamelCase ( self : List[Any] , _snake_case : List[Any] , _snake_case : List[Any] , _snake_case : str , _snake_case : Optional[int] , _snake_case : Union[str, Any]):
"""simple docstring"""
UpperCAmelCase_ = MaskFormerForInstanceSegmentation(config=_snake_case)
model.to(_snake_case)
model.eval()
def comm_check_on_output(_snake_case : Tuple):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None)
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None)
self.parent.assertTrue(result.encoder_last_hidden_state is not None)
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1))
with torch.no_grad():
UpperCAmelCase_ = model(pixel_values=_snake_case , pixel_mask=_snake_case)
UpperCAmelCase_ = model(_snake_case)
comm_check_on_output(_snake_case)
UpperCAmelCase_ = model(
pixel_values=_snake_case , pixel_mask=_snake_case , mask_labels=_snake_case , class_labels=_snake_case)
comm_check_on_output(_snake_case)
self.parent.assertTrue(result.loss is not None)
self.parent.assertEqual(result.loss.shape , torch.Size([1]))
@require_torch
class __snake_case ( a , a , unittest.TestCase ):
UpperCAmelCase__ : Union[str, Any] = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
UpperCAmelCase__ : Optional[Any] = (
{'''feature-extraction''': MaskFormerModel, '''image-segmentation''': MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
UpperCAmelCase__ : Dict = False
UpperCAmelCase__ : List[str] = False
UpperCAmelCase__ : Optional[Any] = False
UpperCAmelCase__ : Union[str, Any] = False
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = MaskFormerModelTester(self)
UpperCAmelCase_ = ConfigTester(self , config_class=_snake_case , has_text_modality=_snake_case)
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(_snake_case , **_snake_case , output_hidden_states=_snake_case)
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*_snake_case)
@unittest.skip(reason='''MaskFormer does not use inputs_embeds''')
def lowerCamelCase ( self : Dict):
"""simple docstring"""
pass
@unittest.skip(reason='''MaskFormer does not have a get_input_embeddings method''')
def lowerCamelCase ( self : int):
"""simple docstring"""
pass
@unittest.skip(reason='''MaskFormer is not a generative model''')
def lowerCamelCase ( self : str):
"""simple docstring"""
pass
@unittest.skip(reason='''MaskFormer does not use token embeddings''')
def lowerCamelCase ( self : int):
"""simple docstring"""
pass
@require_torch_multi_gpu
@unittest.skip(
reason='''MaskFormer has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''')
def lowerCamelCase ( self : Any):
"""simple docstring"""
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''')
def lowerCamelCase ( self : str):
"""simple docstring"""
pass
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(_snake_case)
UpperCAmelCase_ = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ = [*signature.parameters.keys()]
UpperCAmelCase_ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _snake_case)
@slow
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
for model_name in ["facebook/maskformer-swin-small-coco"]:
UpperCAmelCase_ = MaskFormerModel.from_pretrained(_snake_case)
self.assertIsNotNone(_snake_case)
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ = (self.model_tester.min_size,) * 2
UpperCAmelCase_ = {
'''pixel_values''': torch.randn((2, 3, *size) , device=_snake_case),
'''mask_labels''': torch.randn((2, 10, *size) , device=_snake_case),
'''class_labels''': torch.zeros(2 , 10 , device=_snake_case).long(),
}
UpperCAmelCase_ = MaskFormerForInstanceSegmentation(MaskFormerConfig()).to(_snake_case)
UpperCAmelCase_ = model(**_snake_case)
self.assertTrue(outputs.loss is not None)
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(_snake_case , **_snake_case , output_hidden_states=_snake_case)
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(_snake_case).to(_snake_case)
UpperCAmelCase_ = model(**_snake_case , output_attentions=_snake_case)
self.assertTrue(outputs.attentions is not None)
def lowerCamelCase ( self : int):
"""simple docstring"""
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
UpperCAmelCase_ = self.all_model_classes[1]
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
UpperCAmelCase_ = model_class(_snake_case)
model.to(_snake_case)
model.train()
UpperCAmelCase_ = model(_snake_case , mask_labels=_snake_case , class_labels=_snake_case).loss
loss.backward()
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
UpperCAmelCase_ = self.all_model_classes[1]
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
UpperCAmelCase_ = True
UpperCAmelCase_ = True
UpperCAmelCase_ = model_class(_snake_case)
model.to(_snake_case)
model.train()
UpperCAmelCase_ = model(_snake_case , mask_labels=_snake_case , class_labels=_snake_case)
UpperCAmelCase_ = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
UpperCAmelCase_ = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
UpperCAmelCase_ = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
UpperCAmelCase_ = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=_snake_case)
self.assertIsNotNone(encoder_hidden_states.grad)
self.assertIsNotNone(pixel_decoder_hidden_states.grad)
self.assertIsNotNone(transformer_decoder_hidden_states.grad)
self.assertIsNotNone(attentions.grad)
snake_case_ : Dict = 1e-4
def A () -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_vision
@slow
class __snake_case ( unittest.TestCase ):
@cached_property
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
return (
MaskFormerImageProcessor.from_pretrained('''facebook/maskformer-swin-small-coco''')
if is_vision_available()
else None
)
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = MaskFormerModel.from_pretrained('''facebook/maskformer-swin-small-coco''').to(_snake_case)
UpperCAmelCase_ = self.default_image_processor
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = image_processor(_snake_case , return_tensors='''pt''').to(_snake_case)
UpperCAmelCase_ = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0)
# check size
self.assertEqual(_snake_case , (1, 3, 800, 1088))
with torch.no_grad():
UpperCAmelCase_ = model(**_snake_case)
UpperCAmelCase_ = torch.tensor(
[[-0.0_4_8_2, 0.9_2_2_8, 0.4_9_5_1], [-0.2_5_4_7, 0.8_0_1_7, 0.8_5_2_7], [-0.0_0_6_9, 0.3_3_8_5, -0.0_0_8_9]]).to(_snake_case)
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , _snake_case , atol=_snake_case))
UpperCAmelCase_ = torch.tensor(
[[-0.8_4_2_2, -0.8_4_3_4, -0.9_7_1_8], [-1.0_1_4_4, -0.5_5_6_5, -0.4_1_9_5], [-1.0_0_3_8, -0.4_4_8_4, -0.1_9_6_1]]).to(_snake_case)
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , _snake_case , atol=_snake_case))
UpperCAmelCase_ = torch.tensor(
[[0.2_8_5_2, -0.0_1_5_9, 0.9_7_3_5], [0.6_2_5_4, 0.1_8_5_8, 0.8_5_2_9], [-0.0_6_8_0, -0.4_1_1_6, 1.8_4_1_3]]).to(_snake_case)
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , _snake_case , atol=_snake_case))
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''')
.to(_snake_case)
.eval()
)
UpperCAmelCase_ = self.default_image_processor
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = image_processor(_snake_case , return_tensors='''pt''').to(_snake_case)
UpperCAmelCase_ = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0)
# check size
self.assertEqual(_snake_case , (1, 3, 800, 1088))
with torch.no_grad():
UpperCAmelCase_ = model(**_snake_case)
# masks_queries_logits
UpperCAmelCase_ = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
UpperCAmelCase_ = [
[-1.3_7_3_7_1_2_4, -1.7_7_2_4_9_3_7, -1.9_3_6_4_2_3_3],
[-1.5_9_7_7_2_8_1, -1.9_8_6_7_9_3_9, -2.1_5_2_3_6_9_5],
[-1.5_7_9_5_3_9_8, -1.9_2_6_9_8_3_2, -2.0_9_3_9_4_2],
]
UpperCAmelCase_ = torch.tensor(_snake_case).to(_snake_case)
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , _snake_case , atol=_snake_case))
# class_queries_logits
UpperCAmelCase_ = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1))
UpperCAmelCase_ = torch.tensor(
[
[1.65_12e00, -5.25_72e00, -3.35_19e00],
[3.61_69e-02, -5.90_25e00, -2.93_13e00],
[1.07_66e-04, -7.76_30e00, -5.12_63e00],
]).to(_snake_case)
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , _snake_case , atol=_snake_case))
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-resnet101-coco-stuff''')
.to(_snake_case)
.eval()
)
UpperCAmelCase_ = self.default_image_processor
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = image_processor(_snake_case , return_tensors='''pt''').to(_snake_case)
UpperCAmelCase_ = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0)
# check size
self.assertEqual(_snake_case , (1, 3, 800, 1088))
with torch.no_grad():
UpperCAmelCase_ = model(**_snake_case)
# masks_queries_logits
UpperCAmelCase_ = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
UpperCAmelCase_ = [[-0.9_0_4_6, -2.6_3_6_6, -4.6_0_6_2], [-3.4_1_7_9, -5.7_8_9_0, -8.8_0_5_7], [-4.9_1_7_9, -7.6_5_6_0, -1_0.7_7_1_1]]
UpperCAmelCase_ = torch.tensor(_snake_case).to(_snake_case)
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , _snake_case , atol=_snake_case))
# class_queries_logits
UpperCAmelCase_ = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1))
UpperCAmelCase_ = torch.tensor(
[[4.7_1_8_8, -3.2_5_8_5, -2.8_8_5_7], [6.6_8_7_1, -2.9_1_8_1, -1.2_4_8_7], [7.2_4_4_9, -2.2_7_6_4, -2.1_8_7_4]]).to(_snake_case)
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , _snake_case , atol=_snake_case))
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''')
.to(_snake_case)
.eval()
)
UpperCAmelCase_ = self.default_image_processor
UpperCAmelCase_ = image_processor(
[np.zeros((3, 800, 1333)), np.zeros((3, 800, 1333))] , segmentation_maps=[np.zeros((384, 384)).astype(np.floataa), np.zeros((384, 384)).astype(np.floataa)] , return_tensors='''pt''' , )
UpperCAmelCase_ = inputs['''pixel_values'''].to(_snake_case)
UpperCAmelCase_ = [el.to(_snake_case) for el in inputs['''mask_labels''']]
UpperCAmelCase_ = [el.to(_snake_case) for el in inputs['''class_labels''']]
with torch.no_grad():
UpperCAmelCase_ = model(**_snake_case)
self.assertTrue(outputs.loss is not None)
| 51
| 0
|
"""simple docstring"""
# flake8: noqa
# Lint as: python3
from typing import Dict, List, Optional, Type
from .. import config
from ..utils import logging
from .formatting import (
ArrowFormatter,
CustomFormatter,
Formatter,
PandasFormatter,
PythonFormatter,
TensorFormatter,
format_table,
query_table,
)
from .np_formatter import NumpyFormatter
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {}
__UpperCAmelCase = {}
__UpperCAmelCase = {}
def _snake_case ( lowercase__ : type , lowercase__ : Optional[str] , lowercase__ : Optional[List[str]] = None , ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ :Optional[Any] = aliases if aliases is not None else []
if format_type in _FORMAT_TYPES:
logger.warning(
f"""Overwriting format type '{format_type}' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})""" )
lowerCAmelCase_ :Optional[int] = formatter_cls
for alias in set(aliases + [format_type] ):
if alias in _FORMAT_TYPES_ALIASES:
logger.warning(
f"""Overwriting format type alias '{alias}' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})""" )
lowerCAmelCase_ :Optional[Any] = format_type
def _snake_case ( lowercase__ : Exception , lowercase__ : Optional[str] , lowercase__ : Optional[List[str]] = None ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ :int = aliases if aliases is not None else []
for alias in set(aliases + [format_type] ):
lowerCAmelCase_ :List[Any] = unavailable_error
# Here we define all the available formatting functions that can be used by `Dataset.set_format`
_register_formatter(PythonFormatter, None, aliases=['python'])
_register_formatter(ArrowFormatter, 'arrow', aliases=['pa', 'pyarrow'])
_register_formatter(NumpyFormatter, 'numpy', aliases=['np'])
_register_formatter(PandasFormatter, 'pandas', aliases=['pd'])
_register_formatter(CustomFormatter, 'custom')
if config.TORCH_AVAILABLE:
from .torch_formatter import TorchFormatter
_register_formatter(TorchFormatter, 'torch', aliases=['pt', 'pytorch'])
else:
__UpperCAmelCase = ValueError('PyTorch needs to be installed to be able to return PyTorch tensors.')
_register_unavailable_formatter(_torch_error, 'torch', aliases=['pt', 'pytorch'])
if config.TF_AVAILABLE:
from .tf_formatter import TFFormatter
_register_formatter(TFFormatter, 'tensorflow', aliases=['tf'])
else:
__UpperCAmelCase = ValueError('Tensorflow needs to be installed to be able to return Tensorflow tensors.')
_register_unavailable_formatter(_tf_error, 'tensorflow', aliases=['tf'])
if config.JAX_AVAILABLE:
from .jax_formatter import JaxFormatter
_register_formatter(JaxFormatter, 'jax', aliases=[])
else:
__UpperCAmelCase = ValueError('JAX needs to be installed to be able to return JAX arrays.')
_register_unavailable_formatter(_jax_error, 'jax', aliases=[])
def _snake_case ( lowercase__ : Optional[str] ) -> Optional[str]:
'''simple docstring'''
if format_type in _FORMAT_TYPES_ALIASES:
return _FORMAT_TYPES_ALIASES[format_type]
else:
return format_type
def _snake_case ( lowercase__ : Optional[str] , **lowercase__ : List[Any] ) -> Formatter:
'''simple docstring'''
lowerCAmelCase_ :List[Any] = get_format_type_from_alias(__A )
if format_type in _FORMAT_TYPES:
return _FORMAT_TYPES[format_type](**__A )
if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE:
raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type]
else:
raise ValueError(
f"""Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got '{format_type}'""" )
| 84
|
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def A (__A : Optional[int] , __A : int , __A : str=None ) -> List[Any]:
"""simple docstring"""
assert torch_layer.weight.shape == weight.shape, F"""{torch_layer} layer.weight does not match"""
UpperCAmelCase_ = nn.Parameter(__A )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, F"""{torch_layer} layer.bias does not match"""
UpperCAmelCase_ = nn.Parameter(__A )
def A (__A : Tuple , __A : Dict , __A : str ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ = np.asarray(weights[0] )
UpperCAmelCase_ = np.asarray(weights[1] )
UpperCAmelCase_ = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(__A ).transpose(1 , 2 ).contiguous().view(-1 , __A ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(__A ).transpose(1 , 2 ).contiguous().view(-1 , __A ) , )
set_param(
torch_layer.output.dense , torch.tensor(__A ).view(-1 , __A ).contiguous().transpose(0 , 1 ) , )
def A (__A : Optional[Any] , __A : Any , __A : List[Any] ) -> int:
"""simple docstring"""
UpperCAmelCase_ = np.asarray(weights[0] )
UpperCAmelCase_ = np.asarray(weights[1] )
UpperCAmelCase_ = np.asarray(weights[2] )
UpperCAmelCase_ = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(__A ).transpose(1 , 2 ).contiguous().view(-1 , __A ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(__A ).transpose(1 , 2 ).contiguous().view(-1 , __A ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(__A ).transpose(1 , 2 ).contiguous().view(-1 , __A ) , )
set_param(
torch_layer.output.dense , torch.tensor(__A ).view(-1 , __A ).contiguous().transpose(0 , 1 ) , )
def A (__A : int , __A : Union[str, Any] , __A : List[str] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ = weights[0][0][0]
UpperCAmelCase_ = np.asarray(layer_norm_a[0] )
UpperCAmelCase_ = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(__A ) , torch.tensor(__A ) , )
# lsh weights + output
UpperCAmelCase_ = weights[0][1]
if len(__A ) < 4:
set_layer_weights_in_torch_lsh(__A , torch_block.attention , __A )
else:
set_layer_weights_in_torch_local(__A , torch_block.attention , __A )
# intermediate weighs
UpperCAmelCase_ = weights[2][0][1][2]
# Chunked Feed Forward
if len(__A ) == 4:
UpperCAmelCase_ = intermediate_weights[2]
# layernorm 2
UpperCAmelCase_ = np.asarray(intermediate_weights[0][0] )
UpperCAmelCase_ = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(__A ) , torch.tensor(__A ) , )
# intermediate dense
UpperCAmelCase_ = np.asarray(intermediate_weights[1][0] )
UpperCAmelCase_ = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(__A ).transpose(0 , 1 ).contiguous() , torch.tensor(__A ) , )
# intermediate out
UpperCAmelCase_ = np.asarray(intermediate_weights[4][0] )
UpperCAmelCase_ = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(__A ).transpose(0 , 1 ).contiguous() , torch.tensor(__A ) , )
def A (__A : Optional[int] , __A : Tuple , __A : Any ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ = torch_model.reformer
# word embeds
UpperCAmelCase_ = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(__A ) , )
if isinstance(weights[3] , __A ):
UpperCAmelCase_ = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
UpperCAmelCase_ = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), F"""{position_embeddings[emb_idx]} emb does not match"""
UpperCAmelCase_ = nn.Parameter(torch.tensor(__A ) )
UpperCAmelCase_ = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
__A ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
UpperCAmelCase_ = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(__A , __A , __A )
# output layer norm
UpperCAmelCase_ = np.asarray(weights[7][0] )
UpperCAmelCase_ = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(__A ) , torch.tensor(__A ) , )
# output embeddings
UpperCAmelCase_ = np.asarray(weights[9][0] )
UpperCAmelCase_ = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(__A ).transpose(0 , 1 ).contiguous() , torch.tensor(__A ) , )
def A (__A : Tuple , __A : int , __A : str ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ = ReformerConfig.from_json_file(__A )
print(F"""Building PyTorch model from configuration: {config}""" )
UpperCAmelCase_ = ReformerModelWithLMHead(__A )
with open(__A , '''rb''' ) as f:
UpperCAmelCase_ = pickle.load(__A )['''weights''']
set_model_weights_in_torch(__A , __A , config.hidden_size )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , __A )
if __name__ == "__main__":
snake_case_ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--trax_model_pkl_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained Reformer model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
snake_case_ : List[Any] = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 51
| 0
|
'''simple docstring'''
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class SCREAMING_SNAKE_CASE( A__ ):
"""simple docstring"""
def __init__( self : str , __snake_case : NestedDataStructureLike[PathLike] , __snake_case : Optional[NamedSplit] = None , __snake_case : Optional[Features] = None , __snake_case : str = None , __snake_case : bool = False , __snake_case : bool = False , __snake_case : Optional[int] = None , **__snake_case : Optional[int] , ) -> str:
super().__init__(
_snake_case , split=_snake_case , features=_snake_case , cache_dir=_snake_case , keep_in_memory=_snake_case , streaming=_snake_case , num_proc=_snake_case , **_snake_case , )
UpperCAmelCase : Optional[int] = path_or_paths if isinstance(_snake_case , _snake_case ) else {self.split: path_or_paths}
UpperCAmelCase : int = Text(
cache_dir=_snake_case , data_files=_snake_case , features=_snake_case , **_snake_case , )
def A ( self : int ) -> Optional[Any]:
if self.streaming:
UpperCAmelCase : Optional[Any] = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
UpperCAmelCase : List[Any] = None
UpperCAmelCase : Tuple = None
UpperCAmelCase : List[str] = None
UpperCAmelCase : Union[str, Any] = None
self.builder.download_and_prepare(
download_config=_snake_case , download_mode=_snake_case , verification_mode=_snake_case , base_path=_snake_case , num_proc=self.num_proc , )
UpperCAmelCase : Optional[int] = self.builder.as_dataset(
split=self.split , verification_mode=_snake_case , in_memory=self.keep_in_memory )
return dataset
| 23
|
# This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
ControlNetModel,
DDIMScheduler,
StableDiffusionControlNetImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel
from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
)
enable_full_determinism()
class __snake_case ( a , a , a , unittest.TestCase ):
UpperCAmelCase__ : List[Any] = StableDiffusionControlNetImgaImgPipeline
UpperCAmelCase__ : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''}
UpperCAmelCase__ : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
UpperCAmelCase__ : Union[str, Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({'''control_image'''} )
UpperCAmelCase__ : Optional[Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
def lowerCamelCase ( self : int):
"""simple docstring"""
torch.manual_seed(0)
UpperCAmelCase_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
torch.manual_seed(0)
UpperCAmelCase_ = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
torch.manual_seed(0)
UpperCAmelCase_ = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='''scaled_linear''' , clip_sample=_snake_case , set_alpha_to_one=_snake_case , )
torch.manual_seed(0)
UpperCAmelCase_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0)
UpperCAmelCase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
UpperCAmelCase_ = CLIPTextModel(_snake_case)
UpperCAmelCase_ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''')
UpperCAmelCase_ = {
'''unet''': unet,
'''controlnet''': controlnet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def lowerCamelCase ( self : Union[str, Any] , _snake_case : Any , _snake_case : Dict=0):
"""simple docstring"""
if str(_snake_case).startswith('''mps'''):
UpperCAmelCase_ = torch.manual_seed(_snake_case)
else:
UpperCAmelCase_ = torch.Generator(device=_snake_case).manual_seed(_snake_case)
UpperCAmelCase_ = 2
UpperCAmelCase_ = randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=_snake_case , device=torch.device(_snake_case) , )
UpperCAmelCase_ = floats_tensor(control_image.shape , rng=random.Random(_snake_case)).to(_snake_case)
UpperCAmelCase_ = image.cpu().permute(0 , 2 , 3 , 1)[0]
UpperCAmelCase_ = Image.fromarray(np.uinta(_snake_case)).convert('''RGB''').resize((64, 64))
UpperCAmelCase_ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
'''image''': image,
'''control_image''': control_image,
}
return inputs
def lowerCamelCase ( self : Any):
"""simple docstring"""
return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3)
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def lowerCamelCase ( self : Any):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3)
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
self._test_inference_batch_single_identical(expected_max_diff=2e-3)
class __snake_case ( a , a , unittest.TestCase ):
UpperCAmelCase__ : str = StableDiffusionControlNetImgaImgPipeline
UpperCAmelCase__ : List[str] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''}
UpperCAmelCase__ : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
UpperCAmelCase__ : str = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess
def lowerCamelCase ( self : str):
"""simple docstring"""
torch.manual_seed(0)
UpperCAmelCase_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
torch.manual_seed(0)
def init_weights(_snake_case : Optional[int]):
if isinstance(_snake_case , torch.nn.Convad):
torch.nn.init.normal(m.weight)
m.bias.data.fill_(1.0)
UpperCAmelCase_ = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(_snake_case)
torch.manual_seed(0)
UpperCAmelCase_ = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(_snake_case)
torch.manual_seed(0)
UpperCAmelCase_ = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='''scaled_linear''' , clip_sample=_snake_case , set_alpha_to_one=_snake_case , )
torch.manual_seed(0)
UpperCAmelCase_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0)
UpperCAmelCase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
UpperCAmelCase_ = CLIPTextModel(_snake_case)
UpperCAmelCase_ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''')
UpperCAmelCase_ = MultiControlNetModel([controlneta, controlneta])
UpperCAmelCase_ = {
'''unet''': unet,
'''controlnet''': controlnet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def lowerCamelCase ( self : int , _snake_case : Union[str, Any] , _snake_case : str=0):
"""simple docstring"""
if str(_snake_case).startswith('''mps'''):
UpperCAmelCase_ = torch.manual_seed(_snake_case)
else:
UpperCAmelCase_ = torch.Generator(device=_snake_case).manual_seed(_snake_case)
UpperCAmelCase_ = 2
UpperCAmelCase_ = [
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=_snake_case , device=torch.device(_snake_case) , ),
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=_snake_case , device=torch.device(_snake_case) , ),
]
UpperCAmelCase_ = floats_tensor(control_image[0].shape , rng=random.Random(_snake_case)).to(_snake_case)
UpperCAmelCase_ = image.cpu().permute(0 , 2 , 3 , 1)[0]
UpperCAmelCase_ = Image.fromarray(np.uinta(_snake_case)).convert('''RGB''').resize((64, 64))
UpperCAmelCase_ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
'''image''': image,
'''control_image''': control_image,
}
return inputs
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = self.get_dummy_components()
UpperCAmelCase_ = self.pipeline_class(**_snake_case)
pipe.to(_snake_case)
UpperCAmelCase_ = 1_0.0
UpperCAmelCase_ = 4
UpperCAmelCase_ = self.get_dummy_inputs(_snake_case)
UpperCAmelCase_ = steps
UpperCAmelCase_ = scale
UpperCAmelCase_ = pipe(**_snake_case)[0]
UpperCAmelCase_ = self.get_dummy_inputs(_snake_case)
UpperCAmelCase_ = steps
UpperCAmelCase_ = scale
UpperCAmelCase_ = pipe(**_snake_case , control_guidance_start=0.1 , control_guidance_end=0.2)[0]
UpperCAmelCase_ = self.get_dummy_inputs(_snake_case)
UpperCAmelCase_ = steps
UpperCAmelCase_ = scale
UpperCAmelCase_ = pipe(**_snake_case , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7])[0]
UpperCAmelCase_ = self.get_dummy_inputs(_snake_case)
UpperCAmelCase_ = steps
UpperCAmelCase_ = scale
UpperCAmelCase_ = pipe(**_snake_case , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8])[0]
# make sure that all outputs are different
assert np.sum(np.abs(output_a - output_a)) > 1e-3
assert np.sum(np.abs(output_a - output_a)) > 1e-3
assert np.sum(np.abs(output_a - output_a)) > 1e-3
def lowerCamelCase ( self : Dict):
"""simple docstring"""
return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3)
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def lowerCamelCase ( self : int):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3)
def lowerCamelCase ( self : int):
"""simple docstring"""
self._test_inference_batch_single_identical(expected_max_diff=2e-3)
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
UpperCAmelCase_ = self.get_dummy_components()
UpperCAmelCase_ = self.pipeline_class(**_snake_case)
pipe.to(_snake_case)
pipe.set_progress_bar_config(disable=_snake_case)
with tempfile.TemporaryDirectory() as tmpdir:
try:
# save_pretrained is not implemented for Multi-ControlNet
pipe.save_pretrained(_snake_case)
except NotImplementedError:
pass
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
UpperCAmelCase_ = ControlNetModel.from_pretrained('''lllyasviel/sd-controlnet-canny''')
UpperCAmelCase_ = StableDiffusionControlNetImgaImgPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , safety_checker=_snake_case , controlnet=_snake_case)
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=_snake_case)
UpperCAmelCase_ = torch.Generator(device='''cpu''').manual_seed(0)
UpperCAmelCase_ = '''evil space-punk bird'''
UpperCAmelCase_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png''').resize((512, 512))
UpperCAmelCase_ = load_image(
'''https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png''').resize((512, 512))
UpperCAmelCase_ = pipe(
_snake_case , _snake_case , control_image=_snake_case , generator=_snake_case , output_type='''np''' , num_inference_steps=50 , strength=0.6 , )
UpperCAmelCase_ = output.images[0]
assert image.shape == (512, 512, 3)
UpperCAmelCase_ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy''')
assert np.abs(expected_image - image).max() < 9e-2
| 51
| 0
|
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class lowerCAmelCase ( __UpperCamelCase ):
def A_ ( self : Tuple ) -> List[str]:
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def A_ ( self : Dict ) -> Dict:
lowerCamelCase__ : Union[str, Any] = {'col_1': [3, 2, 1, 0], 'col_2': ['a', 'b', 'c', 'd']}
return Dataset.from_dict(_snake_case )
def A_ ( self : Tuple ) -> List[Any]:
lowerCamelCase__ : Dict = self._create_example_records()
lowerCamelCase__ : Optional[int] = Dataset.from_list(_snake_case )
self.assertListEqual(dset.column_names , ['col_1', 'col_2'] )
for i, r in enumerate(_snake_case ):
self.assertDictEqual(_snake_case , example_records[i] )
def A_ ( self : str ) -> str:
lowerCamelCase__ : Optional[int] = self._create_example_records()
lowerCamelCase__ : List[Any] = Dataset.from_list(_snake_case )
lowerCamelCase__ : List[Any] = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} )
self.assertEqual(dset.info , dset_from_dict.info )
def A_ ( self : Optional[Any] ) -> Union[str, Any]: # checks what happens with missing columns
lowerCamelCase__ : Optional[int] = [{'col_1': 1}, {'col_2': 'x'}]
lowerCamelCase__ : Optional[Any] = Dataset.from_list(_snake_case )
self.assertDictEqual(dset[0] , {'col_1': 1} )
self.assertDictEqual(dset[1] , {'col_1': None} ) # NB: first record is used for columns
def A_ ( self : Any ) -> List[Any]: # checks if the type can be inferred from the second record
lowerCamelCase__ : Tuple = [{'col_1': []}, {'col_1': [1, 2]}]
lowerCamelCase__ : Dict = Dataset.from_list(_snake_case )
self.assertEqual(dset.info.features['col_1'] , Sequence(Value('int64' ) ) )
def A_ ( self : int ) -> Tuple:
lowerCamelCase__ : Tuple = Dataset.from_list([] )
self.assertEqual(len(_snake_case ) , 0 )
self.assertListEqual(dset.column_names , [] )
| 50
|
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
snake_case_ : Tuple = logging.get_logger(__name__)
def A (__A : bool , __A : bool ) -> Optional[Any]:
"""simple docstring"""
def run_func(__A : Optional[Any] ):
@wraps(__A )
def run_in_eager_mode(*__A : Dict , **__A : List[Any] ):
return func(*__A , **__A )
@wraps(__A )
@tf.function(experimental_compile=__A )
def run_in_graph_mode(*__A : Optional[Any] , **__A : Any ):
return func(*__A , **__A )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
'''Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.''' )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def A (__A : int , __A : int , __A : int ) -> ["tf.Tensor"]:
"""simple docstring"""
UpperCAmelCase_ = random.Random()
UpperCAmelCase_ = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(__A , shape=(batch_size, sequence_length) , dtype=tf.intaa )
class __snake_case ( a ):
UpperCAmelCase__ : TensorFlowBenchmarkArguments
UpperCAmelCase__ : PretrainedConfig
UpperCAmelCase__ : str = "TensorFlow"
@property
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
return tf.__version__
def lowerCamelCase ( self : Dict , _snake_case : str , _snake_case : int , _snake_case : int):
"""simple docstring"""
UpperCAmelCase_ = self.args.strategy
if strategy is None:
raise ValueError('''A device strategy has to be initialized before using TensorFlow.''')
UpperCAmelCase_ = self._prepare_inference_func(_snake_case , _snake_case , _snake_case)
return self._measure_speed(_inference)
def lowerCamelCase ( self : Any , _snake_case : str , _snake_case : int , _snake_case : int):
"""simple docstring"""
UpperCAmelCase_ = self.args.strategy
if strategy is None:
raise ValueError('''A device strategy has to be initialized before using TensorFlow.''')
UpperCAmelCase_ = self._prepare_train_func(_snake_case , _snake_case , _snake_case)
return self._measure_speed(_train)
def lowerCamelCase ( self : Any , _snake_case : str , _snake_case : int , _snake_case : int):
"""simple docstring"""
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , _snake_case)
UpperCAmelCase_ = self.args.strategy
if strategy is None:
raise ValueError('''A device strategy has to be initialized before using TensorFlow.''')
UpperCAmelCase_ = self._prepare_inference_func(_snake_case , _snake_case , _snake_case)
return self._measure_memory(_inference)
def lowerCamelCase ( self : Optional[Any] , _snake_case : str , _snake_case : int , _snake_case : int):
"""simple docstring"""
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , _snake_case)
UpperCAmelCase_ = self.args.strategy
if strategy is None:
raise ValueError('''A device strategy has to be initialized before using TensorFlow.''')
UpperCAmelCase_ = self._prepare_train_func(_snake_case , _snake_case , _snake_case)
return self._measure_memory(_train)
def lowerCamelCase ( self : Optional[int] , _snake_case : str , _snake_case : int , _snake_case : int):
"""simple docstring"""
UpperCAmelCase_ = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError('''Mixed precision is currently not supported.''')
UpperCAmelCase_ = (
hasattr(_snake_case , '''architectures''')
and isinstance(config.architectures , _snake_case)
and len(config.architectures) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
UpperCAmelCase_ = '''TF''' + config.architectures[0] # prepend 'TF' for tensorflow model
UpperCAmelCase_ = __import__('''transformers''' , fromlist=[model_class])
UpperCAmelCase_ = getattr(_snake_case , _snake_case)
UpperCAmelCase_ = model_cls(_snake_case)
except ImportError:
raise ImportError(
F"""{model_class} does not exist. If you just want to test the pretrained model, you might want to"""
''' set `--only_pretrain_model` or `args.only_pretrain_model=True`.''')
else:
UpperCAmelCase_ = TF_MODEL_MAPPING[config.__class__](_snake_case)
# encoder-decoder has vocab size saved differently
UpperCAmelCase_ = config.vocab_size if hasattr(_snake_case , '''vocab_size''') else config.encoder.vocab_size
UpperCAmelCase_ = random_input_ids(_snake_case , _snake_case , _snake_case)
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla)
def encoder_decoder_forward():
return model(_snake_case , decoder_input_ids=_snake_case , training=_snake_case)
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla)
def encoder_forward():
return model(_snake_case , training=_snake_case)
UpperCAmelCase_ = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def lowerCamelCase ( self : Optional[Any] , _snake_case : str , _snake_case : int , _snake_case : int):
"""simple docstring"""
UpperCAmelCase_ = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError('''Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.''')
if self.args.fpaa:
raise NotImplementedError('''Mixed precision is currently not supported.''')
UpperCAmelCase_ = (
hasattr(_snake_case , '''architectures''')
and isinstance(config.architectures , _snake_case)
and len(config.architectures) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
UpperCAmelCase_ = '''TF''' + config.architectures[0] # prepend 'TF' for tensorflow model
UpperCAmelCase_ = __import__('''transformers''' , fromlist=[model_class])
UpperCAmelCase_ = getattr(_snake_case , _snake_case)
UpperCAmelCase_ = model_cls(_snake_case)
except ImportError:
raise ImportError(
F"""{model_class} does not exist. If you just want to test the pretrained model, you might want to"""
''' set `--only_pretrain_model` or `args.only_pretrain_model=True`.''')
else:
UpperCAmelCase_ = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](_snake_case)
# encoder-decoder has vocab size saved differently
UpperCAmelCase_ = config.vocab_size if hasattr(_snake_case , '''vocab_size''') else config.encoder.vocab_size
UpperCAmelCase_ = random_input_ids(_snake_case , _snake_case , _snake_case)
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla)
def encoder_decoder_train():
UpperCAmelCase_ = model(_snake_case , decoder_input_ids=_snake_case , labels=_snake_case , training=_snake_case)[0]
UpperCAmelCase_ = tf.gradients(_snake_case , model.trainable_variables)
return gradients
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla)
def encoder_train():
UpperCAmelCase_ = model(_snake_case , labels=_snake_case , training=_snake_case)[0]
UpperCAmelCase_ = tf.gradients(_snake_case , model.trainable_variables)
return gradients
UpperCAmelCase_ = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def lowerCamelCase ( self : Any , _snake_case : Optional[Any]):
"""simple docstring"""
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info('''Do inference on TPU. Running model 5 times to stabilize compilation''')
timeit.repeat(_snake_case , repeat=1 , number=5)
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
UpperCAmelCase_ = timeit.repeat(
_snake_case , repeat=self.args.repeat , number=10 , )
return min(_snake_case) / 1_0.0
except ResourceExhaustedError as e:
self.print_fn(F"""Doesn't fit on GPU. {e}""")
def lowerCamelCase ( self : Dict , _snake_case : Callable[[], None]):
"""simple docstring"""
logger.info(
'''Note that TensorFlow allocates more memory than '''
'''it might need to speed up computation. '''
'''The memory reported here corresponds to the memory '''
'''reported by `nvidia-smi`, which can vary depending '''
'''on total available memory on the GPU that is used.''')
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
'''`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory'''
''' consumption line by line.''')
UpperCAmelCase_ = start_memory_tracing('''transformers''')
if self.args.is_tpu:
# tpu
raise NotImplementedError(
'''Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking'''
''' with `args.memory=False`''')
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
'''py3nvml not installed, we won\'t log GPU memory usage. '''
'''Install py3nvml (pip install py3nvml) to log information about GPU.''')
UpperCAmelCase_ = '''N/A'''
else:
logger.info(
'''Measuring total GPU usage on GPU device. Make sure to not have additional processes'''
''' running on the same GPU.''')
# init nvml
nvml.nvmlInit()
func()
UpperCAmelCase_ = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx)
UpperCAmelCase_ = nvml.nvmlDeviceGetMemoryInfo(_snake_case)
UpperCAmelCase_ = meminfo.used
UpperCAmelCase_ = Memory(_snake_case)
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
'''When enabling line by line tracing, the max peak memory for CPU is inaccurate in'''
''' TensorFlow.''')
UpperCAmelCase_ = None
else:
UpperCAmelCase_ = measure_peak_memory_cpu(_snake_case)
UpperCAmelCase_ = Memory(_snake_case) if isinstance(_snake_case , _snake_case) else memory_bytes
if self.args.trace_memory_line_by_line:
UpperCAmelCase_ = stop_memory_tracing(_snake_case)
if memory is None:
UpperCAmelCase_ = summary.total
else:
UpperCAmelCase_ = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(F"""Doesn't fit on GPU. {e}""")
return "N/A", None
| 51
| 0
|
import unittest
import numpy as np
def lowerCAmelCase_ ( _lowercase : np.ndarray , _lowercase : np.ndarray , _lowercase : np.ndarray , _lowercase : np.ndarray | None = None , ) -> np.ndarray:
"""simple docstring"""
a__ : Any = np.shape(__A)
a__ : str = np.shape(__A)
a__ : Union[str, Any] = np.shape(__A)
if shape_a[0] != shape_b[0]:
a__ : Optional[Any] = (
"""Expected the same number of rows for A and B. """
F'''Instead found A of size {shape_a} and B of size {shape_b}'''
)
raise ValueError(__A)
if shape_b[1] != shape_c[1]:
a__ : Union[str, Any] = (
"""Expected the same number of columns for B and C. """
F'''Instead found B of size {shape_b} and C of size {shape_c}'''
)
raise ValueError(__A)
a__ : Optional[Any] = pseudo_inv
if a_inv is None:
try:
a__ : str = np.linalg.inv(__A)
except np.linalg.LinAlgError:
raise ValueError(
"""Input matrix A is not invertible. Cannot compute Schur complement.""")
return mat_c - mat_b.T @ a_inv @ mat_b
class snake_case__ (unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__( self ) -> Dict:
"""simple docstring"""
a__ : Any = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
a__ : List[str] = np.array([[0, 3], [3, 0], [2, 3]] )
a__ : str = np.array([[2, 1], [6, 3]] )
a__ : str = schur_complement(_snake_case , _snake_case , _snake_case )
a__ : Optional[Any] = np.block([[a, b], [b.T, c]] )
a__ : Optional[Any] = np.linalg.det(_snake_case )
a__ : Union[str, Any] = np.linalg.det(_snake_case )
a__ : Union[str, Any] = np.linalg.det(_snake_case )
self.assertAlmostEqual(_snake_case , det_a * det_s )
def SCREAMING_SNAKE_CASE__( self ) -> Dict:
"""simple docstring"""
a__ : List[str] = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
a__ : Union[str, Any] = np.array([[0, 3], [3, 0], [2, 3]] )
a__ : Optional[int] = np.array([[2, 1], [6, 3]] )
with self.assertRaises(_snake_case ):
schur_complement(_snake_case , _snake_case , _snake_case )
def SCREAMING_SNAKE_CASE__( self ) -> Union[str, Any]:
"""simple docstring"""
a__ : List[str] = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
a__ : str = np.array([[0, 3], [3, 0], [2, 3]] )
a__ : Union[str, Any] = np.array([[2, 1, 3], [6, 3, 5]] )
with self.assertRaises(_snake_case ):
schur_complement(_snake_case , _snake_case , _snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main()
| 170
|
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class __snake_case :
@staticmethod
def lowerCamelCase ( *_snake_case : Optional[int] , **_snake_case : int):
"""simple docstring"""
pass
def A (__A : Image ) -> str:
"""simple docstring"""
UpperCAmelCase_ = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class __snake_case ( unittest.TestCase ):
UpperCAmelCase__ : Tuple = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def lowerCamelCase ( self : Tuple , _snake_case : Optional[Any] , _snake_case : Optional[Any] , _snake_case : List[str]):
"""simple docstring"""
UpperCAmelCase_ = DepthEstimationPipeline(model=_snake_case , image_processor=_snake_case)
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def lowerCamelCase ( self : str , _snake_case : Optional[int] , _snake_case : List[str]):
"""simple docstring"""
UpperCAmelCase_ = depth_estimator('''./tests/fixtures/tests_samples/COCO/000000039769.png''')
self.assertEqual({'''predicted_depth''': ANY(torch.Tensor), '''depth''': ANY(Image.Image)} , _snake_case)
import datasets
UpperCAmelCase_ = datasets.load_dataset('''hf-internal-testing/fixtures_image_utils''' , '''image''' , split='''test''')
UpperCAmelCase_ = depth_estimator(
[
Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png'''),
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
# RGBA
dataset[0]['''file'''],
# LA
dataset[1]['''file'''],
# L
dataset[2]['''file'''],
])
self.assertEqual(
[
{'''predicted_depth''': ANY(torch.Tensor), '''depth''': ANY(Image.Image)},
{'''predicted_depth''': ANY(torch.Tensor), '''depth''': ANY(Image.Image)},
{'''predicted_depth''': ANY(torch.Tensor), '''depth''': ANY(Image.Image)},
{'''predicted_depth''': ANY(torch.Tensor), '''depth''': ANY(Image.Image)},
{'''predicted_depth''': ANY(torch.Tensor), '''depth''': ANY(Image.Image)},
] , _snake_case , )
@require_tf
@unittest.skip('''Depth estimation is not implemented in TF''')
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
pass
@slow
@require_torch
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = '''Intel/dpt-large'''
UpperCAmelCase_ = pipeline('''depth-estimation''' , model=_snake_case)
UpperCAmelCase_ = depth_estimator('''http://images.cocodataset.org/val2017/000000039769.jpg''')
UpperCAmelCase_ = hashimage(outputs['''depth'''])
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs['''predicted_depth'''].max().item()) , 2_9.3_0_4)
self.assertEqual(nested_simplify(outputs['''predicted_depth'''].min().item()) , 2.6_6_2)
@require_torch
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
self.skipTest('''There is not hf-internal-testing tiny model for either GLPN nor DPT''')
| 51
| 0
|
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def A_ ( ) -> Dict:
'''simple docstring'''
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(__A ):
requests.request('''GET''' , '''https://huggingface.co''' )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request('''GET''' , '''https://huggingface.co''' , timeout=1.0 )
@pytest.mark.integration
def A_ ( ) -> List[Any]:
'''simple docstring'''
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request('''GET''' , '''https://huggingface.co''' )
def A_ ( ) -> List[Any]:
'''simple docstring'''
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(__A ):
http_head('''https://huggingface.co''' )
| 328
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
snake_case_ : int = {
"configuration_deberta": ["DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP", "DebertaConfig", "DebertaOnnxConfig"],
"tokenization_deberta": ["DebertaTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : int = ["DebertaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : List[str] = [
"DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"DebertaForMaskedLM",
"DebertaForQuestionAnswering",
"DebertaForSequenceClassification",
"DebertaForTokenClassification",
"DebertaModel",
"DebertaPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Any = [
"TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFDebertaForMaskedLM",
"TFDebertaForQuestionAnswering",
"TFDebertaForSequenceClassification",
"TFDebertaForTokenClassification",
"TFDebertaModel",
"TFDebertaPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig
from .tokenization_deberta import DebertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_deberta_fast import DebertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deberta import (
DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
DebertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deberta import (
TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDebertaForMaskedLM,
TFDebertaForQuestionAnswering,
TFDebertaForSequenceClassification,
TFDebertaForTokenClassification,
TFDebertaModel,
TFDebertaPreTrainedModel,
)
else:
import sys
snake_case_ : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 51
| 0
|
'''simple docstring'''
from diffusers.utils.testing_utils import require_onnxruntime
@require_onnxruntime
class A__ :
pass
| 162
|
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
snake_case_ : Union[str, Any] = {"configuration_gpt_neox": ["GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoXConfig"]}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Dict = ["GPTNeoXTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : str = [
"GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTNeoXForCausalLM",
"GPTNeoXForQuestionAnswering",
"GPTNeoXForSequenceClassification",
"GPTNeoXForTokenClassification",
"GPTNeoXLayer",
"GPTNeoXModel",
"GPTNeoXPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox import (
GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXLayer,
GPTNeoXModel,
GPTNeoXPreTrainedModel,
)
else:
import sys
snake_case_ : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 51
| 0
|
'''simple docstring'''
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(_lowercase ) , "Tatoeba directory does not exist." )
class __lowercase ( unittest.TestCase ):
@cached_property
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Dict = tempfile.mkdtemp()
return TatoebaConverter(save_dir=_snake_case )
@slow
def UpperCAmelCase__ (self ):
self.resolver.convert_models(['''heb-eng'''] )
@slow
def UpperCAmelCase__ (self ):
lowerCamelCase_, lowerCamelCase_ : Any = self.resolver.write_model_card('''opus-mt-he-en''' , dry_run=_snake_case )
assert mmeta["long_pair"] == "heb-eng"
| 318
|
def A (__A : list , __A : int , __A : int = 0 , __A : int = 0 ) -> int:
"""simple docstring"""
UpperCAmelCase_ = right or len(__A ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(__A , __A , left + 1 , right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 51
| 0
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
"asapp/sew-d-tiny-100k": "https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json",
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class __lowerCamelCase (_a ):
_lowercase = '''sew-d'''
def __init__( self: List[str],A_: Tuple=32,A_: Dict=768,A_: str=12,A_: Optional[int]=12,A_: Optional[int]=3072,A_: str=2,A_: Tuple=512,A_: Optional[Any]=256,A_: Tuple=True,A_: Dict=True,A_: Optional[Any]=("p2c", "c2p"),A_: int="layer_norm",A_: Any="gelu_python",A_: Any=0.1,A_: Any=0.1,A_: List[str]=0.1,A_: List[str]=0.0,A_: Optional[int]=0.1,A_: int=0.0_2,A_: Dict=1E-7,A_: str=1E-5,A_: Optional[Any]="group",A_: List[str]="gelu",A_: Union[str, Any]=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512),A_: int=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1),A_: Dict=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1),A_: int=False,A_: List[str]=128,A_: Optional[int]=16,A_: int=True,A_: List[Any]=0.0_5,A_: str=10,A_: Any=2,A_: Tuple=0.0,A_: Tuple=10,A_: str=0,A_: List[Any]="mean",A_: str=False,A_: List[Any]=False,A_: Dict=256,A_: Union[str, Any]=0,A_: Union[str, Any]=1,A_: Optional[Any]=2,**A_: Dict,):
'''simple docstring'''
super().__init__(**_snake_case,pad_token_id=_snake_case,bos_token_id=_snake_case,eos_token_id=_snake_case )
__UpperCamelCase = hidden_size
__UpperCamelCase = feat_extract_norm
__UpperCamelCase = feat_extract_activation
__UpperCamelCase = list(_snake_case )
__UpperCamelCase = list(_snake_case )
__UpperCamelCase = list(_snake_case )
__UpperCamelCase = conv_bias
__UpperCamelCase = num_conv_pos_embeddings
__UpperCamelCase = num_conv_pos_embedding_groups
__UpperCamelCase = len(self.conv_dim )
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = intermediate_size
__UpperCamelCase = squeeze_factor
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = position_buckets
__UpperCamelCase = share_att_key
__UpperCamelCase = relative_attention
__UpperCamelCase = norm_rel_ebd
__UpperCamelCase = list(_snake_case )
__UpperCamelCase = hidden_act
__UpperCamelCase = num_attention_heads
__UpperCamelCase = hidden_dropout
__UpperCamelCase = attention_dropout
__UpperCamelCase = activation_dropout
__UpperCamelCase = feat_proj_dropout
__UpperCamelCase = final_dropout
__UpperCamelCase = layer_norm_eps
__UpperCamelCase = feature_layer_norm_eps
__UpperCamelCase = initializer_range
__UpperCamelCase = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect.'
'It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,'
F'''but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)'''
F'''= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__UpperCamelCase = apply_spec_augment
__UpperCamelCase = mask_time_prob
__UpperCamelCase = mask_time_length
__UpperCamelCase = mask_time_min_masks
__UpperCamelCase = mask_feature_prob
__UpperCamelCase = mask_feature_length
__UpperCamelCase = mask_feature_min_masks
# ctc loss
__UpperCamelCase = ctc_loss_reduction
__UpperCamelCase = ctc_zero_infinity
# sequence classification
__UpperCamelCase = use_weighted_layer_sum
__UpperCamelCase = classifier_proj_size
@property
def snake_case_ ( self: Optional[int] ):
'''simple docstring'''
return functools.reduce(operator.mul,self.conv_stride,1 )
| 310
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ : int = logging.get_logger(__name__)
snake_case_ : str = {}
class __snake_case ( a ):
UpperCAmelCase__ : str = '''llama'''
UpperCAmelCase__ : Dict = ['''past_key_values''']
def __init__( self : str , _snake_case : List[str]=32000 , _snake_case : int=4096 , _snake_case : List[str]=11008 , _snake_case : Optional[int]=32 , _snake_case : List[Any]=32 , _snake_case : Tuple=None , _snake_case : int="silu" , _snake_case : List[Any]=2048 , _snake_case : List[str]=0.0_2 , _snake_case : Any=1e-6 , _snake_case : List[str]=True , _snake_case : Optional[Any]=0 , _snake_case : Dict=1 , _snake_case : List[Any]=2 , _snake_case : str=1 , _snake_case : Union[str, Any]=False , _snake_case : str=None , **_snake_case : List[Any] , ):
"""simple docstring"""
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = num_key_value_heads
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = rms_norm_eps
UpperCAmelCase_ = pretraining_tp
UpperCAmelCase_ = use_cache
UpperCAmelCase_ = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=_snake_case , bos_token_id=_snake_case , eos_token_id=_snake_case , tie_word_embeddings=_snake_case , **_snake_case , )
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , _snake_case) or len(self.rope_scaling) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
F"""got {self.rope_scaling}""")
UpperCAmelCase_ = self.rope_scaling.get('''type''' , _snake_case)
UpperCAmelCase_ = self.rope_scaling.get('''factor''' , _snake_case)
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""")
if rope_scaling_factor is None or not isinstance(_snake_case , _snake_case) or rope_scaling_factor <= 1.0:
raise ValueError(F"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""")
| 51
| 0
|
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 241
|
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
snake_case_ : List[str] = logging.get_logger(__name__)
snake_case_ : Tuple = {
"Salesforce/codegen-350M-nl": "https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json",
"Salesforce/codegen-350M-multi": "https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json",
"Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json",
"Salesforce/codegen-2B-nl": "https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json",
"Salesforce/codegen-2B-multi": "https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json",
"Salesforce/codegen-2B-mono": "https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json",
"Salesforce/codegen-6B-nl": "https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json",
"Salesforce/codegen-6B-multi": "https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json",
"Salesforce/codegen-6B-mono": "https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json",
"Salesforce/codegen-16B-nl": "https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json",
"Salesforce/codegen-16B-multi": "https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json",
"Salesforce/codegen-16B-mono": "https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json",
}
class __snake_case ( a ):
UpperCAmelCase__ : str = '''codegen'''
UpperCAmelCase__ : int = {
'''max_position_embeddings''': '''n_positions''',
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self : Union[str, Any] , _snake_case : Union[str, Any]=50400 , _snake_case : Optional[int]=2048 , _snake_case : Union[str, Any]=2048 , _snake_case : List[str]=4096 , _snake_case : Any=28 , _snake_case : List[str]=16 , _snake_case : int=64 , _snake_case : Tuple=None , _snake_case : Dict="gelu_new" , _snake_case : Union[str, Any]=0.0 , _snake_case : Optional[Any]=0.0 , _snake_case : List[Any]=0.0 , _snake_case : List[Any]=1e-5 , _snake_case : List[str]=0.0_2 , _snake_case : Optional[Any]=True , _snake_case : int=50256 , _snake_case : Tuple=50256 , _snake_case : int=False , **_snake_case : Any , ):
"""simple docstring"""
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = n_ctx
UpperCAmelCase_ = n_positions
UpperCAmelCase_ = n_embd
UpperCAmelCase_ = n_layer
UpperCAmelCase_ = n_head
UpperCAmelCase_ = n_inner
UpperCAmelCase_ = rotary_dim
UpperCAmelCase_ = activation_function
UpperCAmelCase_ = resid_pdrop
UpperCAmelCase_ = embd_pdrop
UpperCAmelCase_ = attn_pdrop
UpperCAmelCase_ = layer_norm_epsilon
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = use_cache
UpperCAmelCase_ = bos_token_id
UpperCAmelCase_ = eos_token_id
super().__init__(
bos_token_id=_snake_case , eos_token_id=_snake_case , tie_word_embeddings=_snake_case , **_snake_case)
class __snake_case ( a ):
def __init__( self : Tuple , _snake_case : PretrainedConfig , _snake_case : str = "default" , _snake_case : List[PatchingSpec] = None , _snake_case : bool = False , ):
"""simple docstring"""
super().__init__(_snake_case , task=_snake_case , patching_specs=_snake_case , use_past=_snake_case)
if not getattr(self._config , '''pad_token_id''' , _snake_case):
# TODO: how to do that better?
UpperCAmelCase_ = 0
@property
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}})
if self.use_past:
self.fill_with_past_key_values_(_snake_case , direction='''inputs''')
UpperCAmelCase_ = {0: '''batch''', 1: '''past_sequence + sequence'''}
else:
UpperCAmelCase_ = {0: '''batch''', 1: '''sequence'''}
return common_inputs
@property
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
return self._config.n_layer
@property
def lowerCamelCase ( self : int):
"""simple docstring"""
return self._config.n_head
def lowerCamelCase ( self : Optional[int] , _snake_case : PreTrainedTokenizer , _snake_case : int = -1 , _snake_case : int = -1 , _snake_case : bool = False , _snake_case : Optional[TensorType] = None , ):
"""simple docstring"""
UpperCAmelCase_ = super(_snake_case , self).generate_dummy_inputs(
_snake_case , batch_size=_snake_case , seq_length=_snake_case , is_pair=_snake_case , framework=_snake_case)
# We need to order the input in the way they appears in the forward()
UpperCAmelCase_ = OrderedDict({'''input_ids''': common_inputs['''input_ids''']})
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''')
else:
import torch
UpperCAmelCase_ , UpperCAmelCase_ = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
UpperCAmelCase_ = seqlen + 2
UpperCAmelCase_ = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
UpperCAmelCase_ = [
(torch.zeros(_snake_case), torch.zeros(_snake_case)) for _ in range(self.num_layers)
]
UpperCAmelCase_ = common_inputs['''attention_mask''']
if self.use_past:
UpperCAmelCase_ = ordered_inputs['''attention_mask'''].dtype
UpperCAmelCase_ = torch.cat(
[ordered_inputs['''attention_mask'''], torch.ones(_snake_case , _snake_case , dtype=_snake_case)] , dim=1)
return ordered_inputs
@property
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
return 13
| 51
| 0
|
'''simple docstring'''
def lowerCAmelCase (__A):
"""simple docstring"""
_a = (1 + 24 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def lowerCAmelCase (__A = 5_000):
"""simple docstring"""
_a = [(i * (3 * i - 1)) // 2 for i in range(1 , __A)]
for i, pentagonal_i in enumerate(__A):
for j in range(__A , len(__A)):
_a = pentagonal_nums[j]
_a = pentagonal_i + pentagonal_j
_a = pentagonal_j - pentagonal_i
if is_pentagonal(__A) and is_pentagonal(__A):
return b
return -1
if __name__ == "__main__":
print(F"""{solution() = }""")
| 211
|
import os
import unittest
from transformers.models.phobert.tokenization_phobert import VOCAB_FILES_NAMES, PhobertTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class __snake_case ( a , unittest.TestCase ):
UpperCAmelCase__ : Any = PhobertTokenizer
UpperCAmelCase__ : List[str] = False
def lowerCamelCase ( self : str):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCAmelCase_ = ['''T@@''', '''i''', '''I''', '''R@@''', '''r''', '''e@@''']
UpperCAmelCase_ = dict(zip(_snake_case , range(len(_snake_case))))
UpperCAmelCase_ = ['''#version: 0.2''', '''l à</w>''']
UpperCAmelCase_ = {'''unk_token''': '''<unk>'''}
UpperCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''])
UpperCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''])
with open(self.vocab_file , '''w''' , encoding='''utf-8''') as fp:
for token in vocab_tokens:
fp.write(F"""{token} {vocab_tokens[token]}\n""")
with open(self.merges_file , '''w''' , encoding='''utf-8''') as fp:
fp.write('''\n'''.join(_snake_case))
def lowerCamelCase ( self : int , **_snake_case : Any):
"""simple docstring"""
kwargs.update(self.special_tokens_map)
return PhobertTokenizer.from_pretrained(self.tmpdirname , **_snake_case)
def lowerCamelCase ( self : Optional[Any] , _snake_case : Union[str, Any]):
"""simple docstring"""
UpperCAmelCase_ = '''Tôi là VinAI Research'''
UpperCAmelCase_ = '''T<unk> i <unk> <unk> <unk> <unk> <unk> <unk> I Re<unk> e<unk> <unk> <unk> <unk>'''
return input_text, output_text
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = PhobertTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map)
UpperCAmelCase_ = '''Tôi là VinAI Research'''
UpperCAmelCase_ = '''T@@ ô@@ i l@@ à V@@ i@@ n@@ A@@ I R@@ e@@ s@@ e@@ a@@ r@@ c@@ h'''.split()
UpperCAmelCase_ = tokenizer.tokenize(_snake_case)
print(_snake_case)
self.assertListEqual(_snake_case , _snake_case)
UpperCAmelCase_ = tokens + [tokenizer.unk_token]
UpperCAmelCase_ = [4, 3, 5, 3, 3, 3, 3, 3, 3, 6, 7, 9, 3, 9, 3, 3, 3, 3, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_snake_case) , _snake_case)
| 51
| 0
|
import os
import random
import sys
from . import cryptomath_module as cryptomath
from . import rabin_miller
__lowerCamelCase : Optional[Any] = 3
def _snake_case ( lowerCAmelCase : int ):
"""simple docstring"""
print("Generating primitive root of p" )
while True:
SCREAMING_SNAKE_CASE_ : Optional[Any] = random.randrange(3 , __A )
if pow(__A , 2 , __A ) == 1:
continue
if pow(__A , __A , __A ) == 1:
continue
return g
def _snake_case ( lowerCAmelCase : int ):
"""simple docstring"""
print("Generating prime p..." )
SCREAMING_SNAKE_CASE_ : Any = rabin_miller.generate_large_prime(__A ) # select large prime number.
SCREAMING_SNAKE_CASE_ : Optional[Any] = primitive_root(__A ) # one primitive root on modulo p.
SCREAMING_SNAKE_CASE_ : Optional[int] = random.randrange(3 , __A ) # private_key -> have to be greater than 2 for safety.
SCREAMING_SNAKE_CASE_ : Dict = cryptomath.find_mod_inverse(pow(__A , __A , __A ) , __A )
SCREAMING_SNAKE_CASE_ : Dict = (key_size, e_a, e_a, p)
SCREAMING_SNAKE_CASE_ : Tuple = (key_size, d)
return public_key, private_key
def _snake_case ( lowerCAmelCase : str , lowerCAmelCase : int ):
"""simple docstring"""
if os.path.exists(f'{name}_pubkey.txt' ) or os.path.exists(f'{name}_privkey.txt' ):
print("\nWARNING:" )
print(
f'\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n'
"Use a different name or delete these files and re-run this program." )
sys.exit()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = generate_key(__A )
print(f'\nWriting public key to file {name}_pubkey.txt...' )
with open(f'{name}_pubkey.txt' , "w" ) as fo:
fo.write(f'{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}' )
print(f'Writing private key to file {name}_privkey.txt...' )
with open(f'{name}_privkey.txt' , "w" ) as fo:
fo.write(f'{private_key[0]},{private_key[1]}' )
def _snake_case ( ):
"""simple docstring"""
print("Making key files..." )
make_key_files("elgamal" , 2_0_4_8 )
print("Key files generation successful" )
if __name__ == "__main__":
main()
| 18
|
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
snake_case_ : Any = logging.get_logger(__name__)
snake_case_ : Optional[int] = TypeVar("DatasetType", Dataset, IterableDataset)
def A (__A : List[DatasetType] , __A : Optional[List[float]] = None , __A : Optional[int] = None , __A : Optional[DatasetInfo] = None , __A : Optional[NamedSplit] = None , __A : Literal["first_exhausted", "all_exhausted"] = "first_exhausted" , ) -> DatasetType:
"""simple docstring"""
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError('''Unable to interleave an empty list of datasets.''' )
for i, dataset in enumerate(__A ):
if not isinstance(__A , (Dataset, IterableDataset) ):
if isinstance(__A , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} """
'''is an empty dataset dictionary.''' )
raise ValueError(
F"""Dataset at position {i} has at least one split: {list(__A )}\n"""
F"""Please pick one to interleave with the other datasets, for example: dataset['{next(iter(__A ) )}']""" )
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(__A ).__name__}.""" )
if i == 0:
UpperCAmelCase_ , UpperCAmelCase_ = (
(Dataset, IterableDataset) if isinstance(__A , __A ) else (IterableDataset, Dataset)
)
elif not isinstance(__A , __A ):
raise ValueError(
F"""Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.""" )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(F"""{stopping_strategy} is not supported. Please enter a valid stopping_strategy.""" )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
__A , __A , __A , info=__A , split=__A , stopping_strategy=__A )
else:
return _interleave_iterable_datasets(
__A , __A , __A , info=__A , split=__A , stopping_strategy=__A )
def A (__A : List[DatasetType] , __A : Optional[DatasetInfo] = None , __A : Optional[NamedSplit] = None , __A : int = 0 , ) -> DatasetType:
"""simple docstring"""
if not dsets:
raise ValueError('''Unable to concatenate an empty list of datasets.''' )
for i, dataset in enumerate(__A ):
if not isinstance(__A , (Dataset, IterableDataset) ):
if isinstance(__A , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} """
'''is an empty dataset dictionary.''' )
raise ValueError(
F"""Dataset at position {i} has at least one split: {list(__A )}\n"""
F"""Please pick one to interleave with the other datasets, for example: dataset['{next(iter(__A ) )}']""" )
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(__A ).__name__}.""" )
if i == 0:
UpperCAmelCase_ , UpperCAmelCase_ = (
(Dataset, IterableDataset) if isinstance(__A , __A ) else (IterableDataset, Dataset)
)
elif not isinstance(__A , __A ):
raise ValueError(
F"""Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.""" )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(__A , info=__A , split=__A , axis=__A )
else:
return _concatenate_iterable_datasets(__A , info=__A , split=__A , axis=__A )
| 51
| 0
|
"""simple docstring"""
import argparse
import json
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
VideoMAEConfig,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEImageProcessor,
)
def _snake_case ( lowercase__ : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ :Any = VideoMAEConfig()
set_architecture_configs(__A , __A )
if "finetuned" not in model_name:
lowerCAmelCase_ :Union[str, Any] = False
if "finetuned" in model_name:
lowerCAmelCase_ :int = """huggingface/label-files"""
if "kinetics" in model_name:
lowerCAmelCase_ :List[Any] = 4_0_0
lowerCAmelCase_ :List[str] = """kinetics400-id2label.json"""
elif "ssv2" in model_name:
lowerCAmelCase_ :Tuple = 1_7_4
lowerCAmelCase_ :List[str] = """something-something-v2-id2label.json"""
else:
raise ValueError("""Model name should either contain \'kinetics\' or \'ssv2\' in case it\'s fine-tuned.""" )
lowerCAmelCase_ :Tuple = json.load(open(hf_hub_download(__A , __A , repo_type="""dataset""" ) , """r""" ) )
lowerCAmelCase_ :int = {int(__A ): v for k, v in idalabel.items()}
lowerCAmelCase_ :List[str] = idalabel
lowerCAmelCase_ :Optional[int] = {v: k for k, v in idalabel.items()}
return config
def _snake_case ( lowercase__ : str , lowercase__ : int ) -> Optional[Any]:
'''simple docstring'''
if "small" in model_name:
lowerCAmelCase_ :Optional[Any] = 3_8_4
lowerCAmelCase_ :Union[str, Any] = 1_5_3_6
lowerCAmelCase_ :Dict = 1_2
lowerCAmelCase_ :Tuple = 1_6
lowerCAmelCase_ :str = 1_2
lowerCAmelCase_ :Optional[Any] = 3
lowerCAmelCase_ :int = 1_9_2
lowerCAmelCase_ :Any = 7_6_8
elif "large" in model_name:
lowerCAmelCase_ :Optional[int] = 1_0_2_4
lowerCAmelCase_ :List[Any] = 4_0_9_6
lowerCAmelCase_ :Tuple = 2_4
lowerCAmelCase_ :Dict = 1_6
lowerCAmelCase_ :int = 1_2
lowerCAmelCase_ :Optional[int] = 8
lowerCAmelCase_ :str = 5_1_2
lowerCAmelCase_ :Optional[int] = 2_0_4_8
elif "huge" in model_name:
lowerCAmelCase_ :Union[str, Any] = 1_2_8_0
lowerCAmelCase_ :int = 5_1_2_0
lowerCAmelCase_ :List[str] = 3_2
lowerCAmelCase_ :List[Any] = 1_6
lowerCAmelCase_ :Optional[Any] = 1_2
lowerCAmelCase_ :Dict = 8
lowerCAmelCase_ :Dict = 6_4_0
lowerCAmelCase_ :List[str] = 2_5_6_0
elif "base" not in model_name:
raise ValueError("""Model name should include either \"small\", \"base\", \"large\", or \"huge\"""" )
def _snake_case ( lowercase__ : int ) -> Tuple:
'''simple docstring'''
if "encoder." in name:
lowerCAmelCase_ :Tuple = name.replace("""encoder.""" , """""" )
if "cls_token" in name:
lowerCAmelCase_ :Optional[Any] = name.replace("""cls_token""" , """videomae.embeddings.cls_token""" )
if "decoder_pos_embed" in name:
lowerCAmelCase_ :Optional[int] = name.replace("""decoder_pos_embed""" , """decoder.decoder_pos_embed""" )
if "pos_embed" in name and "decoder" not in name:
lowerCAmelCase_ :Tuple = name.replace("""pos_embed""" , """videomae.embeddings.position_embeddings""" )
if "patch_embed.proj" in name:
lowerCAmelCase_ :Optional[Any] = name.replace("""patch_embed.proj""" , """videomae.embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
lowerCAmelCase_ :str = name.replace("""patch_embed.norm""" , """videomae.embeddings.norm""" )
if "decoder.blocks" in name:
lowerCAmelCase_ :List[str] = name.replace("""decoder.blocks""" , """decoder.decoder_layers""" )
if "blocks" in name:
lowerCAmelCase_ :List[Any] = name.replace("""blocks""" , """videomae.encoder.layer""" )
if "attn.proj" in name:
lowerCAmelCase_ :Optional[int] = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name and "bias" not in name:
lowerCAmelCase_ :Dict = name.replace("""attn""" , """attention.self""" )
if "attn" in name:
lowerCAmelCase_ :Optional[Any] = name.replace("""attn""" , """attention.attention""" )
if "norm1" in name:
lowerCAmelCase_ :Optional[Any] = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
lowerCAmelCase_ :List[Any] = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
lowerCAmelCase_ :Optional[Any] = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
lowerCAmelCase_ :Tuple = name.replace("""mlp.fc2""" , """output.dense""" )
if "decoder_embed" in name:
lowerCAmelCase_ :str = name.replace("""decoder_embed""" , """decoder.decoder_embed""" )
if "decoder_norm" in name:
lowerCAmelCase_ :str = name.replace("""decoder_norm""" , """decoder.decoder_norm""" )
if "decoder_pred" in name:
lowerCAmelCase_ :List[str] = name.replace("""decoder_pred""" , """decoder.decoder_pred""" )
if "norm.weight" in name and "decoder" not in name and "fc" not in name:
lowerCAmelCase_ :Union[str, Any] = name.replace("""norm.weight""" , """videomae.layernorm.weight""" )
if "norm.bias" in name and "decoder" not in name and "fc" not in name:
lowerCAmelCase_ :Optional[int] = name.replace("""norm.bias""" , """videomae.layernorm.bias""" )
if "head" in name and "decoder" not in name:
lowerCAmelCase_ :Any = name.replace("""head""" , """classifier""" )
return name
def _snake_case ( lowercase__ : Union[str, Any] , lowercase__ : List[str] ) -> Tuple:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
lowerCAmelCase_ :Any = orig_state_dict.pop(__A )
if key.startswith("""encoder.""" ):
lowerCAmelCase_ :Dict = key.replace("""encoder.""" , """""" )
if "qkv" in key:
lowerCAmelCase_ :List[str] = key.split(""".""" )
if key.startswith("""decoder.blocks""" ):
lowerCAmelCase_ :Optional[Any] = config.decoder_hidden_size
lowerCAmelCase_ :Tuple = int(key_split[2] )
lowerCAmelCase_ :List[Any] = """decoder.decoder_layers."""
if "weight" in key:
lowerCAmelCase_ :Optional[Any] = val[:dim, :]
lowerCAmelCase_ :List[str] = val[dim : dim * 2, :]
lowerCAmelCase_ :List[str] = val[-dim:, :]
else:
lowerCAmelCase_ :Dict = config.hidden_size
lowerCAmelCase_ :Any = int(key_split[1] )
lowerCAmelCase_ :Union[str, Any] = """videomae.encoder.layer."""
if "weight" in key:
lowerCAmelCase_ :Any = val[:dim, :]
lowerCAmelCase_ :Union[str, Any] = val[dim : dim * 2, :]
lowerCAmelCase_ :Optional[int] = val[-dim:, :]
else:
lowerCAmelCase_ :Union[str, Any] = val
return orig_state_dict
def _snake_case ( ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ :str = hf_hub_download(
repo_id="""hf-internal-testing/spaghetti-video""" , filename="""eating_spaghetti.npy""" , repo_type="""dataset""" )
lowerCAmelCase_ :Optional[int] = np.load(__A )
return list(__A )
def _snake_case ( lowercase__ : Optional[Any] , lowercase__ : Optional[int] , lowercase__ : Any , lowercase__ : Tuple ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ :Union[str, Any] = get_videomae_config(__A )
if "finetuned" in model_name:
lowerCAmelCase_ :Dict = VideoMAEForVideoClassification(__A )
else:
lowerCAmelCase_ :str = VideoMAEForPreTraining(__A )
# download original checkpoint, hosted on Google Drive
lowerCAmelCase_ :Optional[int] = """pytorch_model.bin"""
gdown.cached_download(__A , __A , quiet=__A )
lowerCAmelCase_ :Tuple = torch.load(__A , map_location="""cpu""" )
if "model" in files:
lowerCAmelCase_ :Optional[int] = files["""model"""]
else:
lowerCAmelCase_ :List[Any] = files["""module"""]
lowerCAmelCase_ :List[Any] = convert_state_dict(__A , __A )
model.load_state_dict(__A )
model.eval()
# verify model on basic input
lowerCAmelCase_ :List[str] = VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
lowerCAmelCase_ :Tuple = prepare_video()
lowerCAmelCase_ :List[str] = image_processor(__A , return_tensors="""pt""" )
if "finetuned" not in model_name:
lowerCAmelCase_ :List[Any] = hf_hub_download(repo_id="""hf-internal-testing/bool-masked-pos""" , filename="""bool_masked_pos.pt""" )
lowerCAmelCase_ :Optional[Any] = torch.load(__A )
lowerCAmelCase_ :int = model(**__A )
lowerCAmelCase_ :int = outputs.logits
lowerCAmelCase_ :List[Any] = [
"""videomae-small-finetuned-kinetics""",
"""videomae-small-finetuned-ssv2""",
# Kinetics-400 checkpoints (short = pretrained only for 800 epochs instead of 1600)
"""videomae-base-short""",
"""videomae-base-short-finetuned-kinetics""",
"""videomae-base""",
"""videomae-base-finetuned-kinetics""",
"""videomae-large""",
"""videomae-large-finetuned-kinetics""",
"""videomae-huge-finetuned-kinetics""",
# Something-Something-v2 checkpoints (short = pretrained only for 800 epochs instead of 2400)
"""videomae-base-short-ssv2""",
"""videomae-base-short-finetuned-ssv2""",
"""videomae-base-ssv2""",
"""videomae-base-finetuned-ssv2""",
]
# NOTE: logits were tested with image_mean and image_std equal to [0.5, 0.5, 0.5] and [0.5, 0.5, 0.5]
if model_name == "videomae-small-finetuned-kinetics":
lowerCAmelCase_ :Optional[int] = torch.Size([1, 4_0_0] )
lowerCAmelCase_ :Tuple = torch.tensor([-0.9291, -0.4061, -0.9307] )
elif model_name == "videomae-small-finetuned-ssv2":
lowerCAmelCase_ :Union[str, Any] = torch.Size([1, 1_7_4] )
lowerCAmelCase_ :str = torch.tensor([0.2671, -0.4689, -0.8235] )
elif model_name == "videomae-base":
lowerCAmelCase_ :Union[str, Any] = torch.Size([1, 1_4_0_8, 1_5_3_6] )
lowerCAmelCase_ :List[str] = torch.tensor([[0.7739, 0.7968, 0.7089], [0.6701, 0.7487, 0.6209], [0.4287, 0.5158, 0.4773]] )
elif model_name == "videomae-base-short":
lowerCAmelCase_ :Dict = torch.Size([1, 1_4_0_8, 1_5_3_6] )
lowerCAmelCase_ :List[Any] = torch.tensor([[0.7994, 0.9612, 0.8508], [0.7401, 0.8958, 0.8302], [0.5862, 0.7468, 0.7325]] )
# we verified the loss both for normalized and unnormalized targets for this one
lowerCAmelCase_ :Union[str, Any] = torch.tensor([0.5142] ) if config.norm_pix_loss else torch.tensor([0.6469] )
elif model_name == "videomae-large":
lowerCAmelCase_ :List[Any] = torch.Size([1, 1_4_0_8, 1_5_3_6] )
lowerCAmelCase_ :Union[str, Any] = torch.tensor([[0.7149, 0.7997, 0.6966], [0.6768, 0.7869, 0.6948], [0.5139, 0.6221, 0.5605]] )
elif model_name == "videomae-large-finetuned-kinetics":
lowerCAmelCase_ :Optional[Any] = torch.Size([1, 4_0_0] )
lowerCAmelCase_ :Union[str, Any] = torch.tensor([0.0771, 0.0011, -0.3625] )
elif model_name == "videomae-huge-finetuned-kinetics":
lowerCAmelCase_ :List[str] = torch.Size([1, 4_0_0] )
lowerCAmelCase_ :Dict = torch.tensor([0.2433, 0.1632, -0.4894] )
elif model_name == "videomae-base-short-finetuned-kinetics":
lowerCAmelCase_ :str = torch.Size([1, 4_0_0] )
lowerCAmelCase_ :Optional[Any] = torch.tensor([0.6588, 0.0990, -0.2493] )
elif model_name == "videomae-base-finetuned-kinetics":
lowerCAmelCase_ :Any = torch.Size([1, 4_0_0] )
lowerCAmelCase_ :Optional[Any] = torch.tensor([0.3669, -0.0688, -0.2421] )
elif model_name == "videomae-base-short-ssv2":
lowerCAmelCase_ :Tuple = torch.Size([1, 1_4_0_8, 1_5_3_6] )
lowerCAmelCase_ :Tuple = torch.tensor([[0.4712, 0.5296, 0.5786], [0.2278, 0.2729, 0.4026], [0.0352, 0.0730, 0.2506]] )
elif model_name == "videomae-base-short-finetuned-ssv2":
lowerCAmelCase_ :List[Any] = torch.Size([1, 1_7_4] )
lowerCAmelCase_ :List[str] = torch.tensor([-0.0537, -0.1539, -0.3266] )
elif model_name == "videomae-base-ssv2":
lowerCAmelCase_ :Tuple = torch.Size([1, 1_4_0_8, 1_5_3_6] )
lowerCAmelCase_ :int = torch.tensor([[0.8131, 0.8727, 0.8546], [0.7366, 0.9377, 0.8870], [0.5935, 0.8874, 0.8564]] )
elif model_name == "videomae-base-finetuned-ssv2":
lowerCAmelCase_ :int = torch.Size([1, 1_7_4] )
lowerCAmelCase_ :Union[str, Any] = torch.tensor([0.1961, -0.8337, -0.6389] )
else:
raise ValueError(f"""Model name not supported. Should be one of {model_names}""" )
# verify logits
assert logits.shape == expected_shape
if "finetuned" in model_name:
assert torch.allclose(logits[0, :3] , __A , atol=1E-4 )
else:
print("""Logits:""" , logits[0, :3, :3] )
assert torch.allclose(logits[0, :3, :3] , __A , atol=1E-4 )
print("""Logits ok!""" )
# verify loss, if applicable
if model_name == "videomae-base-short":
lowerCAmelCase_ :List[Any] = outputs.loss
assert torch.allclose(__A , __A , atol=1E-4 )
print("""Loss ok!""" )
if pytorch_dump_folder_path is not None:
print(f"""Saving model and image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__A )
model.save_pretrained(__A )
if push_to_hub:
print("""Pushing to the hub...""" )
model.push_to_hub(__A , organization="""nielsr""" )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://drive.google.com/u/1/uc?id=1tEhLyskjb755TJ65ptsrafUG2llSwQE1&export=download&confirm=t&uuid=aa3276eb-fb7e-482a-adec-dc7171df14c4',
type=str,
help=(
'URL of the original PyTorch checkpoint (on Google Drive) you\'d like to convert. Should be a direct'
' download link.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='/Users/nielsrogge/Documents/VideoMAE/Test',
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--model_name', default='videomae-base', type=str, help='Name of the model.')
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
__UpperCAmelCase = parser.parse_args()
convert_videomae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 84
|
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
snake_case_ : Optional[Any] = "pt"
elif is_tf_available():
snake_case_ : Union[str, Any] = "tf"
else:
snake_case_ : str = "jax"
class __snake_case ( a , unittest.TestCase ):
UpperCAmelCase__ : List[Any] = ByTaTokenizer
UpperCAmelCase__ : int = False
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
super().setUp()
UpperCAmelCase_ = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname)
@cached_property
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
return ByTaTokenizer.from_pretrained('''google/byt5-small''')
def lowerCamelCase ( self : List[str] , **_snake_case : Union[str, Any]):
"""simple docstring"""
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_snake_case)
def lowerCamelCase ( self : Dict , _snake_case : int , _snake_case : Tuple=False , _snake_case : Dict=20 , _snake_case : Optional[Any]=5):
"""simple docstring"""
UpperCAmelCase_ = []
for i in range(len(_snake_case)):
try:
UpperCAmelCase_ = tokenizer.decode([i] , clean_up_tokenization_spaces=_snake_case)
except UnicodeDecodeError:
pass
toks.append((i, tok))
UpperCAmelCase_ = list(filter(lambda _snake_case: re.match(r'''^[ a-zA-Z]+$''' , t[1]) , _snake_case))
UpperCAmelCase_ = list(filter(lambda _snake_case: [t[0]] == tokenizer.encode(t[1] , add_special_tokens=_snake_case) , _snake_case))
if max_length is not None and len(_snake_case) > max_length:
UpperCAmelCase_ = toks[:max_length]
if min_length is not None and len(_snake_case) < min_length and len(_snake_case) > 0:
while len(_snake_case) < min_length:
UpperCAmelCase_ = toks + toks
# toks_str = [t[1] for t in toks]
UpperCAmelCase_ = [t[0] for t in toks]
# Ensure consistency
UpperCAmelCase_ = tokenizer.decode(_snake_case , clean_up_tokenization_spaces=_snake_case)
if " " not in output_txt and len(_snake_case) > 1:
UpperCAmelCase_ = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=_snake_case)
+ ''' '''
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=_snake_case)
)
if with_prefix_space:
UpperCAmelCase_ = ''' ''' + output_txt
UpperCAmelCase_ = tokenizer.encode(_snake_case , add_special_tokens=_snake_case)
return output_txt, output_ids
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
UpperCAmelCase_ = self.ta_base_tokenizer
UpperCAmelCase_ = tokenizer(['''hi</s>''', '''I went to the gym</s>''', '''</s>'''])
UpperCAmelCase_ = tokenizer(['''hi''', '''I went to the gym''', ''''''])
self.assertListEqual(batch_with_eos_added['''input_ids'''] , batch_without_eos_added['''input_ids'''])
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ = self.ta_base_tokenizer
UpperCAmelCase_ = '''Unicode €.'''
UpperCAmelCase_ = tokenizer(_snake_case)
UpperCAmelCase_ = [88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1]
self.assertEqual(encoded['''input_ids'''] , _snake_case)
# decoding
UpperCAmelCase_ = tokenizer.decode(_snake_case)
self.assertEqual(_snake_case , '''Unicode €.</s>''')
UpperCAmelCase_ = tokenizer('''e è é ê ë''')
UpperCAmelCase_ = [104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1]
self.assertEqual(encoded['''input_ids'''] , _snake_case)
# decoding
UpperCAmelCase_ = tokenizer.decode(_snake_case)
self.assertEqual(_snake_case , '''e è é ê ë</s>''')
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('''e è é ê ë''')) , '''e è é ê ë</s>''')
def lowerCamelCase ( self : Any):
"""simple docstring"""
UpperCAmelCase_ = self.ta_base_tokenizer
UpperCAmelCase_ = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
# fmt: off
UpperCAmelCase_ = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0]
# fmt: on
UpperCAmelCase_ = tokenizer(_snake_case , padding=_snake_case , return_tensors=_snake_case)
self.assertIsInstance(_snake_case , _snake_case)
if FRAMEWORK != "jax":
UpperCAmelCase_ = list(batch.input_ids.numpy()[0])
else:
UpperCAmelCase_ = list(batch.input_ids.tolist()[0])
self.assertListEqual(_snake_case , _snake_case)
self.assertEqual((2, 37) , batch.input_ids.shape)
self.assertEqual((2, 37) , batch.attention_mask.shape)
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = self.ta_base_tokenizer
UpperCAmelCase_ = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
UpperCAmelCase_ = tokenizer(_snake_case , padding=_snake_case , return_tensors=_snake_case)
# check if input_ids are returned and no decoder_input_ids
self.assertIn('''input_ids''' , _snake_case)
self.assertIn('''attention_mask''' , _snake_case)
self.assertNotIn('''decoder_input_ids''' , _snake_case)
self.assertNotIn('''decoder_attention_mask''' , _snake_case)
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = self.ta_base_tokenizer
UpperCAmelCase_ = [
'''Summary of the text.''',
'''Another summary.''',
]
UpperCAmelCase_ = tokenizer(
text_target=_snake_case , max_length=32 , padding='''max_length''' , truncation=_snake_case , return_tensors=_snake_case)
self.assertEqual(32 , targets['''input_ids'''].shape[1])
def lowerCamelCase ( self : int):
"""simple docstring"""
UpperCAmelCase_ = self.ta_base_tokenizer
UpperCAmelCase_ = ['''A long paragraph for summarization. </s>''']
UpperCAmelCase_ = ['''Summary of the text. </s>''']
# fmt: off
UpperCAmelCase_ = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1]
UpperCAmelCase_ = [86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1]
# fmt: on
UpperCAmelCase_ = tokenizer(_snake_case , text_target=_snake_case)
self.assertEqual(_snake_case , batch['''input_ids'''][0])
self.assertEqual(_snake_case , batch['''labels'''][0])
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}"""):
self.assertNotEqual(tokenizer.model_max_length , 42)
# Now let's start the test
UpperCAmelCase_ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}"""):
# Isolate this from the other tests because we save additional tokens/etc
UpperCAmelCase_ = tempfile.mkdtemp()
UpperCAmelCase_ = ''' He is very happy, UNwant\u00E9d,running'''
UpperCAmelCase_ = tokenizer.encode(_snake_case , add_special_tokens=_snake_case)
tokenizer.save_pretrained(_snake_case)
UpperCAmelCase_ = tokenizer.__class__.from_pretrained(_snake_case)
UpperCAmelCase_ = after_tokenizer.encode(_snake_case , add_special_tokens=_snake_case)
self.assertListEqual(_snake_case , _snake_case)
shutil.rmtree(_snake_case)
UpperCAmelCase_ = self.get_tokenizers(model_max_length=42)
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}"""):
# Isolate this from the other tests because we save additional tokens/etc
UpperCAmelCase_ = tempfile.mkdtemp()
UpperCAmelCase_ = ''' He is very happy, UNwant\u00E9d,running'''
tokenizer.add_tokens(['''bim''', '''bambam'''])
UpperCAmelCase_ = tokenizer.additional_special_tokens
additional_special_tokens.append('''new_additional_special_token''')
tokenizer.add_special_tokens({'''additional_special_tokens''': additional_special_tokens})
UpperCAmelCase_ = tokenizer.encode(_snake_case , add_special_tokens=_snake_case)
tokenizer.save_pretrained(_snake_case)
UpperCAmelCase_ = tokenizer.__class__.from_pretrained(_snake_case)
UpperCAmelCase_ = after_tokenizer.encode(_snake_case , add_special_tokens=_snake_case)
self.assertListEqual(_snake_case , _snake_case)
self.assertIn('''new_additional_special_token''' , after_tokenizer.additional_special_tokens)
self.assertEqual(after_tokenizer.model_max_length , 42)
UpperCAmelCase_ = tokenizer.__class__.from_pretrained(_snake_case , model_max_length=43)
self.assertEqual(tokenizer.model_max_length , 43)
shutil.rmtree(_snake_case)
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()))
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()))
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_snake_case)
with open(os.path.join(_snake_case , '''special_tokens_map.json''') , encoding='''utf-8''') as json_file:
UpperCAmelCase_ = json.load(_snake_case)
with open(os.path.join(_snake_case , '''tokenizer_config.json''') , encoding='''utf-8''') as json_file:
UpperCAmelCase_ = json.load(_snake_case)
UpperCAmelCase_ = [F"""<extra_id_{i}>""" for i in range(125)]
UpperCAmelCase_ = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
UpperCAmelCase_ = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
with open(os.path.join(_snake_case , '''special_tokens_map.json''') , '''w''' , encoding='''utf-8''') as outfile:
json.dump(_snake_case , _snake_case)
with open(os.path.join(_snake_case , '''tokenizer_config.json''') , '''w''' , encoding='''utf-8''') as outfile:
json.dump(_snake_case , _snake_case)
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
UpperCAmelCase_ = tokenizer_class.from_pretrained(
_snake_case , )
self.assertIn(
'''an_additional_special_token''' , tokenizer_without_change_in_init.additional_special_tokens)
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
['''an_additional_special_token'''] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['''an_additional_special_token'''])) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
UpperCAmelCase_ = added_tokens_extra_ids + [AddedToken('''a_new_additional_special_token''' , lstrip=_snake_case)]
UpperCAmelCase_ = tokenizer_class.from_pretrained(
_snake_case , additional_special_tokens=_snake_case , )
self.assertIn('''a_new_additional_special_token''' , tokenizer.additional_special_tokens)
self.assertEqual(
['''a_new_additional_special_token'''] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['''a_new_additional_special_token'''])) , )
def lowerCamelCase ( self : Any):
"""simple docstring"""
UpperCAmelCase_ = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()))
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()))
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_snake_case)
UpperCAmelCase_ = tokenizer_class.from_pretrained(_snake_case)
self.assertTrue(tokenizer.decode([255]) == '''''')
def lowerCamelCase ( self : int):
"""simple docstring"""
pass
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
pass
def lowerCamelCase ( self : Dict):
"""simple docstring"""
pass
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
pass
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = self.get_tokenizers(fast=_snake_case , do_lower_case=_snake_case)
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}"""):
UpperCAmelCase_ = ['''t''', '''h''', '''i''', '''s''', ''' ''', '''i''', '''s''', ''' ''', '''a''', ''' ''', '''t''', '''e''', '''x''', '''t''', '''</s>''']
UpperCAmelCase_ = tokenizer.convert_tokens_to_string(_snake_case)
self.assertIsInstance(_snake_case , _snake_case)
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
UpperCAmelCase_ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}"""):
UpperCAmelCase_ = [
'''bos_token''',
'''eos_token''',
'''unk_token''',
'''sep_token''',
'''pad_token''',
'''cls_token''',
'''mask_token''',
]
UpperCAmelCase_ = 0
UpperCAmelCase_ = tokenizer.convert_ids_to_tokens(
_snake_case , skip_special_tokens=_snake_case)
for attr in attributes_list:
setattr(_snake_case , attr + '''_id''' , _snake_case)
self.assertEqual(getattr(_snake_case , _snake_case) , _snake_case)
self.assertEqual(getattr(_snake_case , attr + '''_id''') , _snake_case)
setattr(_snake_case , attr + '''_id''' , _snake_case)
self.assertEqual(getattr(_snake_case , _snake_case) , _snake_case)
self.assertEqual(getattr(_snake_case , attr + '''_id''') , _snake_case)
setattr(_snake_case , '''additional_special_tokens_ids''' , [])
self.assertListEqual(getattr(_snake_case , '''additional_special_tokens''') , [])
self.assertListEqual(getattr(_snake_case , '''additional_special_tokens_ids''') , [])
setattr(_snake_case , '''additional_special_tokens_ids''' , [token_id_to_test_setters])
self.assertListEqual(getattr(_snake_case , '''additional_special_tokens''') , [token_to_test_setters])
self.assertListEqual(getattr(_snake_case , '''additional_special_tokens_ids''') , [token_id_to_test_setters])
| 51
| 0
|
'''simple docstring'''
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("0.12.2"):
raise Exception("requires fairseq >= 0.12.2")
if version.parse(fairseq.__version__) > version.parse("2"):
raise Exception("requires fairseq < v2")
logging.set_verbosity_info()
UpperCamelCase__: List[Any] = logging.get_logger(__name__)
UpperCamelCase__: Any = "Hello, World!"
UpperCamelCase__: Tuple = "en_XX"
def snake_case_ ( _lowerCAmelCase : str , _lowerCAmelCase : str , _lowerCAmelCase : bool ) -> Dict:
UpperCAmelCase : Optional[Any] = Path('''data_bin''' )
UpperCAmelCase : Any = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(__A ).parent ) , checkpoint_file=Path(__A ).name , _name='''xmod_base''' , arch='''xmod_base''' , task='''multilingual_masked_lm''' , data_name_or_path=str(__A ) , bpe='''sentencepiece''' , sentencepiece_model=str(Path(__A ).parent / '''sentencepiece.bpe.model''' ) , src_dict=str(data_dir / '''dict.txt''' ) , )
xmod.eval() # disable dropout
print(__A )
UpperCAmelCase : Dict = xmod.model.encoder.sentence_encoder
UpperCAmelCase : Dict = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1e-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , '''bottleneck''' , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
UpperCAmelCase : Dict = xmod.model.classification_heads['''mnli'''].out_proj.weight.shape[0]
print('''Our X-MOD config:''' , __A )
UpperCAmelCase : Dict = XmodForSequenceClassification(__A ) if classification_head else XmodForMaskedLM(__A )
model.eval()
# Now let's copy all the weights.
# Embeddings
UpperCAmelCase : Optional[int] = xmod_sent_encoder.embed_tokens.weight
UpperCAmelCase : Union[str, Any] = xmod_sent_encoder.embed_positions.weight
UpperCAmelCase : Optional[int] = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them.
UpperCAmelCase : Union[str, Any] = xmod_sent_encoder.layernorm_embedding.weight
UpperCAmelCase : Tuple = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
UpperCAmelCase : Dict = model.roberta.encoder.layer[i]
UpperCAmelCase : str = xmod_sent_encoder.layers[i]
# self attention
UpperCAmelCase : Optional[int] = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
):
raise AssertionError('''Dimensions of self-attention weights do not match.''' )
UpperCAmelCase : Optional[Any] = xmod_layer.self_attn.q_proj.weight
UpperCAmelCase : Optional[int] = xmod_layer.self_attn.q_proj.bias
UpperCAmelCase : List[str] = xmod_layer.self_attn.k_proj.weight
UpperCAmelCase : Dict = xmod_layer.self_attn.k_proj.bias
UpperCAmelCase : List[Any] = xmod_layer.self_attn.v_proj.weight
UpperCAmelCase : Dict = xmod_layer.self_attn.v_proj.bias
# self-attention output
UpperCAmelCase : Dict = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError('''Dimensions of self-attention output weights do not match.''' )
UpperCAmelCase : Optional[Any] = xmod_layer.self_attn.out_proj.weight
UpperCAmelCase : Optional[Any] = xmod_layer.self_attn.out_proj.bias
UpperCAmelCase : Optional[int] = xmod_layer.self_attn_layer_norm.weight
UpperCAmelCase : str = xmod_layer.self_attn_layer_norm.bias
# intermediate
UpperCAmelCase : Optional[int] = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('''Dimensions of intermediate weights do not match.''' )
UpperCAmelCase : Optional[int] = xmod_layer.fca.weight
UpperCAmelCase : Tuple = xmod_layer.fca.bias
# output
UpperCAmelCase : List[str] = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('''Dimensions of feed-forward weights do not match.''' )
UpperCAmelCase : Optional[Any] = xmod_layer.fca.weight
UpperCAmelCase : int = xmod_layer.fca.bias
UpperCAmelCase : str = xmod_layer.final_layer_norm.weight
UpperCAmelCase : List[Any] = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
UpperCAmelCase : Dict = xmod_layer.adapter_layer_norm.weight
UpperCAmelCase : List[Any] = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ):
raise AssertionError('''Lists of language adapters do not match.''' )
for lang_code, adapter in xmod_layer.adapter_modules.items():
UpperCAmelCase : Dict = bert_output.adapter_modules[lang_code]
UpperCAmelCase : Union[str, Any] = xmod_layer.adapter_modules[lang_code]
UpperCAmelCase : Tuple = from_adapter.fca.weight
UpperCAmelCase : str = from_adapter.fca.bias
UpperCAmelCase : Any = from_adapter.fca.weight
UpperCAmelCase : Any = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
UpperCAmelCase : str = xmod_sent_encoder.layer_norm.weight
UpperCAmelCase : Optional[int] = xmod_sent_encoder.layer_norm.bias
if classification_head:
UpperCAmelCase : Dict = xmod.model.classification_heads['''mnli'''].dense.weight
UpperCAmelCase : Union[str, Any] = xmod.model.classification_heads['''mnli'''].dense.bias
UpperCAmelCase : Optional[Any] = xmod.model.classification_heads['''mnli'''].out_proj.weight
UpperCAmelCase : Tuple = xmod.model.classification_heads['''mnli'''].out_proj.bias
else:
# LM Head
UpperCAmelCase : int = xmod.model.encoder.lm_head.dense.weight
UpperCAmelCase : Any = xmod.model.encoder.lm_head.dense.bias
UpperCAmelCase : str = xmod.model.encoder.lm_head.layer_norm.weight
UpperCAmelCase : List[str] = xmod.model.encoder.lm_head.layer_norm.bias
UpperCAmelCase : Union[str, Any] = xmod.model.encoder.lm_head.weight
UpperCAmelCase : str = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
UpperCAmelCase : Any = xmod.encode(__A ).unsqueeze(0 ) # batch of size 1
model.roberta.set_default_language(__A )
UpperCAmelCase : Any = model(__A )[0]
if classification_head:
UpperCAmelCase : Optional[int] = xmod.model.classification_heads['''mnli'''](xmod.extract_features(__A ) )
else:
UpperCAmelCase : Optional[Any] = xmod.model(__A , lang_id=[SAMPLE_LANGUAGE] )[0]
print(our_output.shape , their_output.shape )
UpperCAmelCase : List[Any] = torch.max(torch.abs(our_output - their_output ) ).item()
print(f"""max_absolute_diff = {max_absolute_diff}""" ) # ~ 1e-7
UpperCAmelCase : str = torch.allclose(__A , __A , atol=1e-3 )
print('''Do both models output the same tensors?''' , '''🔥''' if success else '''💩''' )
if not success:
raise Exception('''Something went wRoNg''' )
Path(__A ).mkdir(parents=__A , exist_ok=__A )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(__A )
if __name__ == "__main__":
UpperCamelCase__: Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--xmod_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--classification_head", action="store_true", help="Whether to convert a final classification head."
)
UpperCamelCase__: Optional[Any] = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 23
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
snake_case_ : Dict = {"configuration_mbart": ["MBART_PRETRAINED_CONFIG_ARCHIVE_MAP", "MBartConfig", "MBartOnnxConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Tuple = ["MBartTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : str = ["MBartTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : List[Any] = [
"MBART_PRETRAINED_MODEL_ARCHIVE_LIST",
"MBartForCausalLM",
"MBartForConditionalGeneration",
"MBartForQuestionAnswering",
"MBartForSequenceClassification",
"MBartModel",
"MBartPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Any = [
"TFMBartForConditionalGeneration",
"TFMBartModel",
"TFMBartPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : List[str] = [
"FlaxMBartForConditionalGeneration",
"FlaxMBartForQuestionAnswering",
"FlaxMBartForSequenceClassification",
"FlaxMBartModel",
"FlaxMBartPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart import MBartTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart_fast import MBartTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mbart import (
MBART_PRETRAINED_MODEL_ARCHIVE_LIST,
MBartForCausalLM,
MBartForConditionalGeneration,
MBartForQuestionAnswering,
MBartForSequenceClassification,
MBartModel,
MBartPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mbart import (
FlaxMBartForConditionalGeneration,
FlaxMBartForQuestionAnswering,
FlaxMBartForSequenceClassification,
FlaxMBartModel,
FlaxMBartPreTrainedModel,
)
else:
import sys
snake_case_ : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 51
| 0
|
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
_UpperCAmelCase : Union[str, Any] = logging.get_logger(__name__)
_UpperCAmelCase : Optional[int] = {name: getattr(transformers, name + """Fast""") for name in SLOW_TO_FAST_CONVERTERS}
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[int]:
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(F"""Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.""" )
if tokenizer_name is None:
lowerCamelCase__ : str = TOKENIZER_CLASSES
else:
lowerCamelCase__ : List[Any] = {tokenizer_name: getattr(__A , tokenizer_name + 'Fast' )}
logger.info(F"""Loading tokenizer classes: {tokenizer_names}""" )
for tokenizer_name in tokenizer_names:
lowerCamelCase__ : Optional[int] = TOKENIZER_CLASSES[tokenizer_name]
lowerCamelCase__ : Optional[int] = True
if checkpoint_name is None:
lowerCamelCase__ : Tuple = list(tokenizer_class.max_model_input_sizes.keys() )
else:
lowerCamelCase__ : Tuple = [checkpoint_name]
logger.info(F"""For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}""" )
for checkpoint in checkpoint_names:
logger.info(F"""Loading {tokenizer_class.__class__.__name__} {checkpoint}""" )
# Load tokenizer
lowerCamelCase__ : List[Any] = tokenizer_class.from_pretrained(__A , force_download=__A )
# Save fast tokenizer
logger.info(F"""Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}""" )
# For organization names we create sub-directories
if "/" in checkpoint:
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = checkpoint.split('/' )
lowerCamelCase__ : Optional[Any] = os.path.join(__A , __A )
elif add_prefix:
lowerCamelCase__ : Any = checkpoint
lowerCamelCase__ : Any = dump_path
else:
lowerCamelCase__ : Optional[Any] = None
lowerCamelCase__ : str = dump_path
logger.info(F"""=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}""" )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
lowerCamelCase__ : Optional[int] = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
lowerCamelCase__ : str = file_path.split(__A )[-1][0]
if next_char == "/":
lowerCamelCase__ : str = os.path.join(__A , __A )
lowerCamelCase__ : Tuple = None
logger.info(F"""=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}""" )
lowerCamelCase__ : str = tokenizer.save_pretrained(
__A , legacy_format=__A , filename_prefix=__A )
logger.info(F"""=> File names {file_names}""" )
for file_name in file_names:
if not file_name.endswith('tokenizer.json' ):
os.remove(__A )
logger.info(F"""=> removing {file_name}""" )
if __name__ == "__main__":
_UpperCAmelCase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--dump_path""", default=None, type=str, required=True, help="""Path to output generated fast tokenizer files."""
)
parser.add_argument(
"""--tokenizer_name""",
default=None,
type=str,
help=(
F"""Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will """
"""download and convert all the checkpoints from AWS."""
),
)
parser.add_argument(
"""--checkpoint_name""",
default=None,
type=str,
help="""Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.""",
)
parser.add_argument(
"""--force_download""",
action="""store_true""",
help="""Re-download checkpoints.""",
)
_UpperCAmelCase : Union[str, Any] = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 50
|
import warnings
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __snake_case ( a ):
UpperCAmelCase__ : Dict = ['''image_processor''', '''tokenizer''']
UpperCAmelCase__ : Dict = '''FlavaImageProcessor'''
UpperCAmelCase__ : Dict = ('''BertTokenizer''', '''BertTokenizerFast''')
def __init__( self : Union[str, Any] , _snake_case : List[str]=None , _snake_case : str=None , **_snake_case : int):
"""simple docstring"""
UpperCAmelCase_ = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , _snake_case , )
UpperCAmelCase_ = kwargs.pop('''feature_extractor''')
UpperCAmelCase_ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''')
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''')
super().__init__(_snake_case , _snake_case)
UpperCAmelCase_ = self.image_processor
def __call__( self : List[Any] , _snake_case : Optional[ImageInput] = None , _snake_case : Optional[Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]]] = None , _snake_case : bool = True , _snake_case : Union[bool, str, PaddingStrategy] = False , _snake_case : Union[bool, str, TruncationStrategy] = False , _snake_case : Optional[int] = None , _snake_case : int = 0 , _snake_case : Optional[int] = None , _snake_case : Optional[bool] = None , _snake_case : Optional[bool] = None , _snake_case : Optional[bool] = None , _snake_case : Optional[bool] = None , _snake_case : bool = False , _snake_case : bool = False , _snake_case : bool = False , _snake_case : bool = False , _snake_case : bool = True , _snake_case : Optional[Union[str, TensorType]] = None , **_snake_case : Any , ):
"""simple docstring"""
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''')
if text is not None:
UpperCAmelCase_ = self.tokenizer(
text=_snake_case , add_special_tokens=_snake_case , padding=_snake_case , truncation=_snake_case , max_length=_snake_case , stride=_snake_case , pad_to_multiple_of=_snake_case , return_token_type_ids=_snake_case , return_attention_mask=_snake_case , return_overflowing_tokens=_snake_case , return_special_tokens_mask=_snake_case , return_offsets_mapping=_snake_case , return_length=_snake_case , verbose=_snake_case , return_tensors=_snake_case , **_snake_case , )
if images is not None:
UpperCAmelCase_ = self.image_processor(
_snake_case , return_image_mask=_snake_case , return_codebook_pixels=_snake_case , return_tensors=_snake_case , **_snake_case , )
if text is not None and images is not None:
encoding.update(_snake_case)
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_snake_case) , tensor_type=_snake_case)
def lowerCamelCase ( self : Any , *_snake_case : Optional[Any] , **_snake_case : int):
"""simple docstring"""
return self.tokenizer.batch_decode(*_snake_case , **_snake_case)
def lowerCamelCase ( self : Optional[int] , *_snake_case : int , **_snake_case : Dict):
"""simple docstring"""
return self.tokenizer.decode(*_snake_case , **_snake_case)
@property
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ = self.tokenizer.model_input_names
UpperCAmelCase_ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
@property
def lowerCamelCase ( self : str):
"""simple docstring"""
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , _snake_case , )
return self.image_processor_class
@property
def lowerCamelCase ( self : Any):
"""simple docstring"""
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , _snake_case , )
return self.image_processor
| 51
| 0
|
from collections.abc import Iterable
from typing import Any
class snake_case__ :
"""simple docstring"""
def __init__( self , __lowercase = None ) -> Tuple:
"""simple docstring"""
a__ : Tuple = value
a__ : Any = None # Added in order to delete a node easier
a__ : Union[str, Any] = None
a__ : List[Any] = None
def __repr__( self ) -> Dict:
"""simple docstring"""
from pprint import pformat
if self.left is None and self.right is None:
return str(self.value )
return pformat({F'''{self.value}''': (self.left, self.right)} , indent=1 )
class snake_case__ :
"""simple docstring"""
def __init__( self , __lowercase = None ) -> List[Any]:
"""simple docstring"""
a__ : Dict = root
def __str__( self ) -> Tuple:
"""simple docstring"""
return str(self.root )
def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase ) -> str:
"""simple docstring"""
if new_children is not None: # reset its kids
a__ : List[str] = node.parent
if node.parent is not None: # reset its parent
if self.is_right(_snake_case ): # If it is the right children
a__ : Dict = new_children
else:
a__ : Union[str, Any] = new_children
else:
a__ : str = new_children
def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> str:
"""simple docstring"""
if node.parent and node.parent.right:
return node == node.parent.right
return False
def SCREAMING_SNAKE_CASE__( self ) -> Tuple:
"""simple docstring"""
return self.root is None
def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> Dict:
"""simple docstring"""
a__ : Optional[Any] = Node(_snake_case ) # create a new Node
if self.empty(): # if Tree is empty
a__ : Any = new_node # set its root
else: # Tree is not empty
a__ : List[str] = self.root # from root
if parent_node is None:
return
while True: # While we don't get to a leaf
if value < parent_node.value: # We go left
if parent_node.left is None:
a__ : List[Any] = new_node # We insert the new node in a leaf
break
else:
a__ : List[Any] = parent_node.left
else:
if parent_node.right is None:
a__ : Any = new_node
break
else:
a__ : List[Any] = parent_node.right
a__ : List[Any] = parent_node
def SCREAMING_SNAKE_CASE__( self , *__lowercase ) -> Any:
"""simple docstring"""
for value in values:
self.__insert(_snake_case )
def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> Any:
"""simple docstring"""
if self.empty():
raise IndexError("""Warning: Tree is empty! please use another.""" )
else:
a__ : Tuple = self.root
# use lazy evaluation here to avoid NoneType Attribute error
while node is not None and node.value is not value:
a__ : Optional[int] = node.left if value < node.value else node.right
return node
def SCREAMING_SNAKE_CASE__( self , __lowercase = None ) -> int:
"""simple docstring"""
if node is None:
if self.root is None:
return None
a__ : Optional[Any] = self.root
if not self.empty():
while node.right is not None:
a__ : Any = node.right
return node
def SCREAMING_SNAKE_CASE__( self , __lowercase = None ) -> Tuple:
"""simple docstring"""
if node is None:
a__ : Dict = self.root
if self.root is None:
return None
if not self.empty():
a__ : Optional[int] = self.root
while node.left is not None:
a__ : Tuple = node.left
return node
def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> List[str]:
"""simple docstring"""
a__ : List[str] = self.search(_snake_case ) # Look for the node with that label
if node is not None:
if node.left is None and node.right is None: # If it has no children
self.__reassign_nodes(_snake_case , _snake_case )
elif node.left is None: # Has only right children
self.__reassign_nodes(_snake_case , node.right )
elif node.right is None: # Has only left children
self.__reassign_nodes(_snake_case , node.left )
else:
a__ : Optional[Any] = self.get_max(
node.left ) # Gets the max value of the left branch
self.remove(tmp_node.value ) # type: ignore
a__ : Union[str, Any] = (
tmp_node.value # type: ignore
) # Assigns the value to the node to delete and keep tree structure
def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> int:
"""simple docstring"""
if node is not None:
yield node # Preorder Traversal
yield from self.preorder_traverse(node.left )
yield from self.preorder_traverse(node.right )
def SCREAMING_SNAKE_CASE__( self , __lowercase=None ) -> str:
"""simple docstring"""
if traversal_function is None:
return self.preorder_traverse(self.root )
else:
return traversal_function(self.root )
def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase ) -> str:
"""simple docstring"""
if node:
self.inorder(_snake_case , node.left )
arr.append(node.value )
self.inorder(_snake_case , node.right )
def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase ) -> Union[str, Any]:
"""simple docstring"""
a__ : int = []
self.inorder(_snake_case , _snake_case ) # append all values to list using inorder traversal
return arr[k - 1]
def lowerCAmelCase_ ( _lowercase : Node | None) -> list[Node]:
"""simple docstring"""
a__ : List[Any] = []
if curr_node is not None:
a__ : List[str] = postorder(curr_node.left) + postorder(curr_node.right) + [curr_node]
return node_list
def lowerCAmelCase_ ( ) -> None:
"""simple docstring"""
a__ : Any = (8, 3, 6, 1, 10, 14, 13, 4, 7)
a__ : Any = BinarySearchTree()
for i in testlist:
t.insert(__A)
# Prints all the elements of the list in order traversal
print(__A)
if t.search(6) is not None:
print("""The value 6 exists""")
else:
print("""The value 6 doesn\'t exist""")
if t.search(-1) is not None:
print("""The value -1 exists""")
else:
print("""The value -1 doesn\'t exist""")
if not t.empty():
print("""Max Value: """ , t.get_max().value) # type: ignore
print("""Min Value: """ , t.get_min().value) # type: ignore
for i in testlist:
t.remove(__A)
print(__A)
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 170
|
from diffusers.utils.testing_utils import require_onnxruntime
@require_onnxruntime
class __snake_case :
pass
| 51
| 0
|
import argparse
import os
import pickle
import sys
import torch
from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl
from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils
from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
# We do this to be able to load python 2 datasets pickles
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
lowercase__ : List[Any] = data_utils.TransfoXLTokenizer
lowercase__ : int = data_utils.TransfoXLCorpus
lowercase__ : List[Any] = data_utils
lowercase__ : int = data_utils
def A_ ( snake_case : Dict , snake_case : List[Any] , snake_case : Union[str, Any] , snake_case : Tuple ) -> Union[str, Any]:
'''simple docstring'''
if transfo_xl_dataset_file:
# Convert a pre-processed corpus (see original TensorFlow repo)
with open(__A , '''rb''' ) as fp:
__UpperCamelCase = pickle.load(__A , encoding='''latin1''' )
# Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
__UpperCamelCase = pytorch_dump_folder_path + '''/''' + VOCAB_FILES_NAMES['''pretrained_vocab_file''']
print(f"Save vocabulary to {pytorch_vocab_dump_path}" )
__UpperCamelCase = corpus.vocab.__dict__
torch.save(__A , __A )
__UpperCamelCase = corpus.__dict__
corpus_dict_no_vocab.pop('''vocab''' , __A )
__UpperCamelCase = pytorch_dump_folder_path + '''/''' + CORPUS_NAME
print(f"Save dataset to {pytorch_dataset_dump_path}" )
torch.save(__A , __A )
if tf_checkpoint_path:
# Convert a pre-trained TensorFlow model
__UpperCamelCase = os.path.abspath(__A )
__UpperCamelCase = os.path.abspath(__A )
print(f"Converting Transformer XL checkpoint from {tf_path} with config at {config_path}." )
# Initialise PyTorch model
if transfo_xl_config_file == "":
__UpperCamelCase = TransfoXLConfig()
else:
__UpperCamelCase = TransfoXLConfig.from_json_file(__A )
print(f"Building PyTorch model from configuration: {config}" )
__UpperCamelCase = TransfoXLLMHeadModel(__A )
__UpperCamelCase = load_tf_weights_in_transfo_xl(__A , __A , __A )
# Save pytorch-model
__UpperCamelCase = os.path.join(__A , __A )
__UpperCamelCase = os.path.join(__A , __A )
print(f"Save PyTorch model to {os.path.abspath(__A )}" )
torch.save(model.state_dict() , __A )
print(f"Save configuration file to {os.path.abspath(__A )}" )
with open(__A , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
lowercase__ : List[str] = argparse.ArgumentParser()
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=True,
help="Path to the folder to store the PyTorch model or dataset/vocab.",
)
parser.add_argument(
"--tf_checkpoint_path",
default="",
type=str,
help="An optional path to a TensorFlow checkpoint path to be converted.",
)
parser.add_argument(
"--transfo_xl_config_file",
default="",
type=str,
help=(
"An optional config json file corresponding to the pre-trained BERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--transfo_xl_dataset_file",
default="",
type=str,
help="An optional dataset file to be converted in a vocabulary.",
)
lowercase__ : int = parser.parse_args()
convert_transfo_xl_checkpoint_to_pytorch(
args.tf_checkpoint_path,
args.transfo_xl_config_file,
args.pytorch_dump_folder_path,
args.transfo_xl_dataset_file,
)
| 328
|
import argparse
import os
import pickle
import sys
import torch
from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl
from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils
from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
# We do this to be able to load python 2 datasets pickles
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
snake_case_ : List[Any] = data_utils.TransfoXLTokenizer
snake_case_ : int = data_utils.TransfoXLCorpus
snake_case_ : List[Any] = data_utils
snake_case_ : int = data_utils
def A (__A : Dict , __A : List[Any] , __A : Union[str, Any] , __A : Tuple ) -> Union[str, Any]:
"""simple docstring"""
if transfo_xl_dataset_file:
# Convert a pre-processed corpus (see original TensorFlow repo)
with open(__A , '''rb''' ) as fp:
UpperCAmelCase_ = pickle.load(__A , encoding='''latin1''' )
# Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
UpperCAmelCase_ = pytorch_dump_folder_path + '''/''' + VOCAB_FILES_NAMES['''pretrained_vocab_file''']
print(F"""Save vocabulary to {pytorch_vocab_dump_path}""" )
UpperCAmelCase_ = corpus.vocab.__dict__
torch.save(__A , __A )
UpperCAmelCase_ = corpus.__dict__
corpus_dict_no_vocab.pop('''vocab''' , __A )
UpperCAmelCase_ = pytorch_dump_folder_path + '''/''' + CORPUS_NAME
print(F"""Save dataset to {pytorch_dataset_dump_path}""" )
torch.save(__A , __A )
if tf_checkpoint_path:
# Convert a pre-trained TensorFlow model
UpperCAmelCase_ = os.path.abspath(__A )
UpperCAmelCase_ = os.path.abspath(__A )
print(F"""Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.""" )
# Initialise PyTorch model
if transfo_xl_config_file == "":
UpperCAmelCase_ = TransfoXLConfig()
else:
UpperCAmelCase_ = TransfoXLConfig.from_json_file(__A )
print(F"""Building PyTorch model from configuration: {config}""" )
UpperCAmelCase_ = TransfoXLLMHeadModel(__A )
UpperCAmelCase_ = load_tf_weights_in_transfo_xl(__A , __A , __A )
# Save pytorch-model
UpperCAmelCase_ = os.path.join(__A , __A )
UpperCAmelCase_ = os.path.join(__A , __A )
print(F"""Save PyTorch model to {os.path.abspath(__A )}""" )
torch.save(model.state_dict() , __A )
print(F"""Save configuration file to {os.path.abspath(__A )}""" )
with open(__A , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
snake_case_ : List[str] = argparse.ArgumentParser()
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=True,
help="Path to the folder to store the PyTorch model or dataset/vocab.",
)
parser.add_argument(
"--tf_checkpoint_path",
default="",
type=str,
help="An optional path to a TensorFlow checkpoint path to be converted.",
)
parser.add_argument(
"--transfo_xl_config_file",
default="",
type=str,
help=(
"An optional config json file corresponding to the pre-trained BERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--transfo_xl_dataset_file",
default="",
type=str,
help="An optional dataset file to be converted in a vocabulary.",
)
snake_case_ : int = parser.parse_args()
convert_transfo_xl_checkpoint_to_pytorch(
args.tf_checkpoint_path,
args.transfo_xl_config_file,
args.pytorch_dump_folder_path,
args.transfo_xl_dataset_file,
)
| 51
| 0
|
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> list:
A_ = int(__A )
if n_element < 1:
A_ = ValueError("""a should be a positive number""" )
raise my_error
A_ = [1]
A_ , A_ , A_ = (0, 0, 0)
A_ = 1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2, hamming_list[j] * 3, hamming_list[k] * 5 ) )
index += 1
return hamming_list
if __name__ == "__main__":
__lowerCamelCase = input('''Enter the last number (nth term) of the Hamming Number Series: ''')
print('''Formula of Hamming Number Series => 2^i * 3^j * 5^k''')
__lowerCamelCase = hamming(int(n))
print('''-----------------------------------------------------''')
print(f"""The list with nth numbers is: {hamming_numbers}""")
print('''-----------------------------------------------------''')
| 162
|
from typing import Optional, Tuple, Union
import torch
from einops import rearrange, reduce
from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNetaDConditionModel
from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput
from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput
snake_case_ : List[str] = 8
def A (__A : Union[str, Any] , __A : List[Any]=BITS ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ = x.device
UpperCAmelCase_ = (x * 255).int().clamp(0 , 255 )
UpperCAmelCase_ = 2 ** torch.arange(bits - 1 , -1 , -1 , device=__A )
UpperCAmelCase_ = rearrange(__A , '''d -> d 1 1''' )
UpperCAmelCase_ = rearrange(__A , '''b c h w -> b c 1 h w''' )
UpperCAmelCase_ = ((x & mask) != 0).float()
UpperCAmelCase_ = rearrange(__A , '''b c d h w -> b (c d) h w''' )
UpperCAmelCase_ = bits * 2 - 1
return bits
def A (__A : Dict , __A : Tuple=BITS ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ = x.device
UpperCAmelCase_ = (x > 0).int()
UpperCAmelCase_ = 2 ** torch.arange(bits - 1 , -1 , -1 , device=__A , dtype=torch.intaa )
UpperCAmelCase_ = rearrange(__A , '''d -> d 1 1''' )
UpperCAmelCase_ = rearrange(__A , '''b (c d) h w -> b c d h w''' , d=8 )
UpperCAmelCase_ = reduce(x * mask , '''b c d h w -> b c h w''' , '''sum''' )
return (dec / 255).clamp(0.0 , 1.0 )
def A (self : List[Any] , __A : torch.FloatTensor , __A : int , __A : torch.FloatTensor , __A : float = 0.0 , __A : bool = True , __A : Tuple=None , __A : bool = True , ) -> Union[DDIMSchedulerOutput, Tuple]:
"""simple docstring"""
if self.num_inference_steps is None:
raise ValueError(
'''Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler''' )
# See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf
# Ideally, read DDIM paper in-detail understanding
# Notation (<variable name> -> <name in paper>
# - pred_noise_t -> e_theta(x_t, t)
# - pred_original_sample -> f_theta(x_t, t) or x_0
# - std_dev_t -> sigma_t
# - eta -> η
# - pred_sample_direction -> "direction pointing to x_t"
# - pred_prev_sample -> "x_t-1"
# 1. get previous step value (=t-1)
UpperCAmelCase_ = timestep - self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
UpperCAmelCase_ = self.alphas_cumprod[timestep]
UpperCAmelCase_ = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
UpperCAmelCase_ = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
UpperCAmelCase_ = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
# 4. Clip "predicted x_0"
UpperCAmelCase_ = self.bit_scale
if self.config.clip_sample:
UpperCAmelCase_ = torch.clamp(__A , -scale , __A )
# 5. compute variance: "sigma_t(η)" -> see formula (16)
# σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1)
UpperCAmelCase_ = self._get_variance(__A , __A )
UpperCAmelCase_ = eta * variance ** 0.5
if use_clipped_model_output:
# the model_output is always re-derived from the clipped x_0 in Glide
UpperCAmelCase_ = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
# 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
UpperCAmelCase_ = (1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output
# 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
UpperCAmelCase_ = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if eta > 0:
# randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072
UpperCAmelCase_ = model_output.device if torch.is_tensor(__A ) else '''cpu'''
UpperCAmelCase_ = torch.randn(model_output.shape , dtype=model_output.dtype , generator=__A ).to(__A )
UpperCAmelCase_ = self._get_variance(__A , __A ) ** 0.5 * eta * noise
UpperCAmelCase_ = prev_sample + variance
if not return_dict:
return (prev_sample,)
return DDIMSchedulerOutput(prev_sample=__A , pred_original_sample=__A )
def A (self : Optional[int] , __A : torch.FloatTensor , __A : int , __A : torch.FloatTensor , __A : int="epsilon" , __A : Optional[Any]=None , __A : bool = True , ) -> Union[DDPMSchedulerOutput, Tuple]:
"""simple docstring"""
UpperCAmelCase_ = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]:
UpperCAmelCase_ , UpperCAmelCase_ = torch.split(__A , sample.shape[1] , dim=1 )
else:
UpperCAmelCase_ = None
# 1. compute alphas, betas
UpperCAmelCase_ = self.alphas_cumprod[t]
UpperCAmelCase_ = self.alphas_cumprod[t - 1] if t > 0 else self.one
UpperCAmelCase_ = 1 - alpha_prod_t
UpperCAmelCase_ = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if prediction_type == "epsilon":
UpperCAmelCase_ = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif prediction_type == "sample":
UpperCAmelCase_ = model_output
else:
raise ValueError(F"""Unsupported prediction_type {prediction_type}.""" )
# 3. Clip "predicted x_0"
UpperCAmelCase_ = self.bit_scale
if self.config.clip_sample:
UpperCAmelCase_ = torch.clamp(__A , -scale , __A )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
UpperCAmelCase_ = (alpha_prod_t_prev ** 0.5 * self.betas[t]) / beta_prod_t
UpperCAmelCase_ = self.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
UpperCAmelCase_ = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
UpperCAmelCase_ = 0
if t > 0:
UpperCAmelCase_ = torch.randn(
model_output.size() , dtype=model_output.dtype , layout=model_output.layout , generator=__A ).to(model_output.device )
UpperCAmelCase_ = (self._get_variance(__A , predicted_variance=__A ) ** 0.5) * noise
UpperCAmelCase_ = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return DDPMSchedulerOutput(prev_sample=__A , pred_original_sample=__A )
class __snake_case ( a ):
def __init__( self : Union[str, Any] , _snake_case : UNetaDConditionModel , _snake_case : Union[DDIMScheduler, DDPMScheduler] , _snake_case : Optional[float] = 1.0 , ):
"""simple docstring"""
super().__init__()
UpperCAmelCase_ = bit_scale
UpperCAmelCase_ = (
ddim_bit_scheduler_step if isinstance(_snake_case , _snake_case) else ddpm_bit_scheduler_step
)
self.register_modules(unet=_snake_case , scheduler=_snake_case)
@torch.no_grad()
def __call__( self : Union[str, Any] , _snake_case : Optional[int] = 256 , _snake_case : Optional[int] = 256 , _snake_case : Optional[int] = 50 , _snake_case : Optional[torch.Generator] = None , _snake_case : Optional[int] = 1 , _snake_case : Optional[str] = "pil" , _snake_case : bool = True , **_snake_case : Optional[Any] , ):
"""simple docstring"""
UpperCAmelCase_ = torch.randn(
(batch_size, self.unet.config.in_channels, height, width) , generator=_snake_case , )
UpperCAmelCase_ = decimal_to_bits(_snake_case) * self.bit_scale
UpperCAmelCase_ = latents.to(self.device)
self.scheduler.set_timesteps(_snake_case)
for t in self.progress_bar(self.scheduler.timesteps):
# predict the noise residual
UpperCAmelCase_ = self.unet(_snake_case , _snake_case).sample
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase_ = self.scheduler.step(_snake_case , _snake_case , _snake_case).prev_sample
UpperCAmelCase_ = bits_to_decimal(_snake_case)
if output_type == "pil":
UpperCAmelCase_ = self.numpy_to_pil(_snake_case)
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_snake_case)
| 51
| 0
|
'''simple docstring'''
import os
import tempfile
import unittest
from transformers import NezhaConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
)
from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST
class __lowercase :
def __init__(self , A , A=1_3 , A=7 , A=True , A=True , A=True , A=True , A=9_9 , A=3_2 , A=5 , A=4 , A=3_7 , A="gelu" , A=0.1 , A=0.1 , A=1_2_8 , A=3_2 , A=1_6 , A=2 , A=0.02 , A=3 , A=4 , A=None , ):
lowerCamelCase_ : Dict = parent
lowerCamelCase_ : str = batch_size
lowerCamelCase_ : Dict = seq_length
lowerCamelCase_ : List[str] = is_training
lowerCamelCase_ : List[Any] = use_input_mask
lowerCamelCase_ : List[Any] = use_token_type_ids
lowerCamelCase_ : Tuple = use_labels
lowerCamelCase_ : Union[str, Any] = vocab_size
lowerCamelCase_ : Dict = hidden_size
lowerCamelCase_ : List[Any] = num_hidden_layers
lowerCamelCase_ : int = num_attention_heads
lowerCamelCase_ : Tuple = intermediate_size
lowerCamelCase_ : int = hidden_act
lowerCamelCase_ : Tuple = hidden_dropout_prob
lowerCamelCase_ : Tuple = attention_probs_dropout_prob
lowerCamelCase_ : Optional[int] = max_position_embeddings
lowerCamelCase_ : Optional[Any] = type_vocab_size
lowerCamelCase_ : Dict = type_sequence_label_size
lowerCamelCase_ : Any = initializer_range
lowerCamelCase_ : int = num_labels
lowerCamelCase_ : Union[str, Any] = num_choices
lowerCamelCase_ : str = scope
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase_ : Optional[Any] = None
if self.use_input_mask:
lowerCamelCase_ : int = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase_ : List[Any] = None
if self.use_token_type_ids:
lowerCamelCase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase_ : Tuple = None
lowerCamelCase_ : List[Any] = None
lowerCamelCase_ : Union[str, Any] = None
if self.use_labels:
lowerCamelCase_ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase_ : Dict = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase_ : str = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase__ (self ):
return NezhaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_snake_case , initializer_range=self.initializer_range , )
def UpperCAmelCase__ (self ):
(
(
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
),
) : Tuple = self.prepare_config_and_inputs()
lowerCamelCase_ : Any = True
lowerCamelCase_ : Tuple = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowerCamelCase_ : Tuple = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def UpperCAmelCase__ (self , A , A , A , A , A , A , A ):
lowerCamelCase_ : Optional[Any] = NezhaModel(config=_snake_case )
model.to(_snake_case )
model.eval()
lowerCamelCase_ : Union[str, Any] = model(_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case )
lowerCamelCase_ : Dict = model(_snake_case , token_type_ids=_snake_case )
lowerCamelCase_ : str = model(_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCAmelCase__ (self , A , A , A , A , A , A , A , A , A , ):
lowerCamelCase_ : List[str] = True
lowerCamelCase_ : Optional[Any] = NezhaModel(_snake_case )
model.to(_snake_case )
model.eval()
lowerCamelCase_ : Any = model(
_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , encoder_hidden_states=_snake_case , encoder_attention_mask=_snake_case , )
lowerCamelCase_ : Any = model(
_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , encoder_hidden_states=_snake_case , )
lowerCamelCase_ : Union[str, Any] = model(_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCAmelCase__ (self , A , A , A , A , A , A , A ):
lowerCamelCase_ : Tuple = NezhaForMaskedLM(config=_snake_case )
model.to(_snake_case )
model.eval()
lowerCamelCase_ : Any = model(_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , labels=_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase__ (self , A , A , A , A , A , A , A ):
lowerCamelCase_ : Union[str, Any] = NezhaForNextSentencePrediction(config=_snake_case )
model.to(_snake_case )
model.eval()
lowerCamelCase_ : List[str] = model(
_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , labels=_snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def UpperCAmelCase__ (self , A , A , A , A , A , A , A ):
lowerCamelCase_ : List[Any] = NezhaForPreTraining(config=_snake_case )
model.to(_snake_case )
model.eval()
lowerCamelCase_ : List[Any] = model(
_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , labels=_snake_case , next_sentence_label=_snake_case , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def UpperCAmelCase__ (self , A , A , A , A , A , A , A ):
lowerCamelCase_ : List[str] = NezhaForQuestionAnswering(config=_snake_case )
model.to(_snake_case )
model.eval()
lowerCamelCase_ : Union[str, Any] = model(
_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , start_positions=_snake_case , end_positions=_snake_case , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase__ (self , A , A , A , A , A , A , A ):
lowerCamelCase_ : List[str] = self.num_labels
lowerCamelCase_ : str = NezhaForSequenceClassification(_snake_case )
model.to(_snake_case )
model.eval()
lowerCamelCase_ : List[Any] = model(_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , labels=_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase__ (self , A , A , A , A , A , A , A ):
lowerCamelCase_ : Optional[Any] = self.num_labels
lowerCamelCase_ : int = NezhaForTokenClassification(config=_snake_case )
model.to(_snake_case )
model.eval()
lowerCamelCase_ : Optional[int] = model(_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , labels=_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase__ (self , A , A , A , A , A , A , A ):
lowerCamelCase_ : List[Any] = self.num_choices
lowerCamelCase_ : Union[str, Any] = NezhaForMultipleChoice(config=_snake_case )
model.to(_snake_case )
model.eval()
lowerCamelCase_ : int = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCamelCase_ : Optional[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCamelCase_ : List[str] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCamelCase_ : List[Any] = model(
_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , labels=_snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Optional[Any] = self.prepare_config_and_inputs()
(
(
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
),
) : Optional[Any] = config_and_inputs
lowerCamelCase_ : Optional[Any] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __lowercase ( _lowercase , _lowercase , _lowercase , unittest.TestCase ):
lowerCamelCase : List[str] = (
(
NezhaModel,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
)
if is_torch_available()
else ()
)
lowerCamelCase : str = (
{
'''feature-extraction''': NezhaModel,
'''fill-mask''': NezhaForMaskedLM,
'''question-answering''': NezhaForQuestionAnswering,
'''text-classification''': NezhaForSequenceClassification,
'''token-classification''': NezhaForTokenClassification,
'''zero-shot''': NezhaForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase : Union[str, Any] = True
def UpperCAmelCase__ (self , A , A , A=False ):
lowerCamelCase_ : Dict = super()._prepare_for_class(_snake_case , _snake_case , return_labels=_snake_case )
if return_labels:
if model_class in get_values(_snake_case ):
lowerCamelCase_ : str = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=_snake_case )
lowerCamelCase_ : Dict = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_snake_case )
return inputs_dict
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[Any] = NezhaModelTester(self )
lowerCamelCase_ : List[Any] = ConfigTester(self , config_class=_snake_case , hidden_size=3_7 )
def UpperCAmelCase__ (self ):
self.config_tester.run_common_tests()
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Any = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*_snake_case )
def UpperCAmelCase__ (self ):
(
(
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
),
) : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
lowerCamelCase_ : int = None
self.model_tester.create_and_check_model_as_decoder(
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_snake_case )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_snake_case )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_next_sequence_prediction(*_snake_case )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_snake_case )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_snake_case )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_snake_case )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_snake_case )
@slow
def UpperCAmelCase__ (self ):
for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ : Dict = NezhaModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
@slow
@require_torch_gpu
def UpperCAmelCase__ (self ):
lowerCamelCase_, lowerCamelCase_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# NezhaForMultipleChoice behaves incorrectly in JIT environments.
if model_class == NezhaForMultipleChoice:
return
lowerCamelCase_ : Tuple = True
lowerCamelCase_ : Tuple = model_class(config=_snake_case )
lowerCamelCase_ : List[Any] = self._prepare_for_class(_snake_case , _snake_case )
lowerCamelCase_ : Optional[int] = torch.jit.trace(
_snake_case , (inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(_snake_case , os.path.join(_snake_case , '''bert.pt''' ) )
lowerCamelCase_ : Dict = torch.jit.load(os.path.join(_snake_case , '''bert.pt''' ) , map_location=_snake_case )
loaded(inputs_dict['''input_ids'''].to(_snake_case ) , inputs_dict['''attention_mask'''].to(_snake_case ) )
@require_torch
class __lowercase ( unittest.TestCase ):
@slow
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Union[str, Any] = NezhaModel.from_pretrained('''sijunhe/nezha-cn-base''' )
lowerCamelCase_ : Dict = torch.tensor([[0, 1, 2, 3, 4, 5]] )
lowerCamelCase_ : List[Any] = torch.tensor([[0, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowerCamelCase_ : int = model(_snake_case , attention_mask=_snake_case )[0]
lowerCamelCase_ : List[Any] = torch.Size((1, 6, 7_6_8) )
self.assertEqual(output.shape , _snake_case )
lowerCamelCase_ : int = torch.tensor([[[0.06_85, 0.24_41, 0.11_02], [0.06_00, 0.19_06, 0.13_49], [0.02_21, 0.08_19, 0.05_86]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _snake_case , atol=1E-4 ) )
@slow
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Optional[int] = NezhaForMaskedLM.from_pretrained('''sijunhe/nezha-cn-base''' )
lowerCamelCase_ : List[str] = torch.tensor([[0, 1, 2, 3, 4, 5]] )
lowerCamelCase_ : Union[str, Any] = torch.tensor([[1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowerCamelCase_ : Optional[Any] = model(_snake_case , attention_mask=_snake_case )[0]
lowerCamelCase_ : Any = torch.Size((1, 6, 2_1_1_2_8) )
self.assertEqual(output.shape , _snake_case )
lowerCamelCase_ : Optional[int] = torch.tensor(
[[-2.79_39, -1.79_02, -2.21_89], [-2.85_85, -1.89_08, -2.37_23], [-2.64_99, -1.77_50, -2.25_58]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _snake_case , atol=1E-4 ) )
| 318
|
snake_case_ : Dict = {
"Pillow": "Pillow",
"accelerate": "accelerate>=0.11.0",
"compel": "compel==0.1.8",
"black": "black~=23.1",
"datasets": "datasets",
"filelock": "filelock",
"flax": "flax>=0.4.1",
"hf-doc-builder": "hf-doc-builder>=0.3.0",
"huggingface-hub": "huggingface-hub>=0.13.2",
"requests-mock": "requests-mock==1.10.0",
"importlib_metadata": "importlib_metadata",
"invisible-watermark": "invisible-watermark",
"isort": "isort>=5.5.4",
"jax": "jax>=0.2.8,!=0.3.2",
"jaxlib": "jaxlib>=0.1.65",
"Jinja2": "Jinja2",
"k-diffusion": "k-diffusion>=0.0.12",
"torchsde": "torchsde",
"note_seq": "note_seq",
"librosa": "librosa",
"numpy": "numpy",
"omegaconf": "omegaconf",
"parameterized": "parameterized",
"protobuf": "protobuf>=3.20.3,<4",
"pytest": "pytest",
"pytest-timeout": "pytest-timeout",
"pytest-xdist": "pytest-xdist",
"ruff": "ruff>=0.0.241",
"safetensors": "safetensors",
"sentencepiece": "sentencepiece>=0.1.91,!=0.1.92",
"scipy": "scipy",
"onnx": "onnx",
"regex": "regex!=2019.12.17",
"requests": "requests",
"tensorboard": "tensorboard",
"torch": "torch>=1.4",
"torchvision": "torchvision",
"transformers": "transformers>=4.25.1",
"urllib3": "urllib3<=2.0.0",
}
| 51
| 0
|
def _A ( _lowercase = 1_00_00_00 ) -> int:
"""simple docstring"""
__UpperCamelCase = limit + 1
__UpperCamelCase = [0] * limit
for first_term in range(1 , __A ):
for n in range(__A , __A , __A ):
__UpperCamelCase = first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
__UpperCamelCase = sum(1 for x in frequency[1:limit] if x == 10 )
return count
if __name__ == "__main__":
print(f"""{solution() = }""")
| 310
|
from datetime import datetime
import requests
def A (__A : str ) -> bytes:
"""simple docstring"""
UpperCAmelCase_ = '''https://downloadgram.net/wp-json/wppress/video-downloader/video?url='''
UpperCAmelCase_ = requests.get(base_url + url ).json()[0]['''urls'''][0]['''src''']
return requests.get(__A ).content
if __name__ == "__main__":
snake_case_ : Optional[Any] = input("Enter Video/IGTV url: ").strip()
snake_case_ : Any = f"{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4"
with open(file_name, "wb") as fp:
fp.write(download_video(url))
print(f"Done. Video saved to disk as {file_name}.")
| 51
| 0
|
"""simple docstring"""
class __lowerCamelCase :
'''simple docstring'''
def __init__( self : Optional[int] , a_ : str = "" , a_ : bool = False ):
lowerCAmelCase_ : List[Any] = {}
# A node will be a leaf if the tree contains its word
lowerCAmelCase_ : Union[str, Any] = is_leaf
lowerCAmelCase_ : List[str] = prefix
def lowerCamelCase ( self : Union[str, Any] , a_ : str ):
lowerCAmelCase_ : int = 0
for q, w in zip(self.prefix , _snake_case ):
if q != w:
break
x += 1
return self.prefix[:x], self.prefix[x:], word[x:]
def lowerCamelCase ( self : Optional[Any] , a_ : list[str] ):
for word in words:
self.insert(_snake_case )
def lowerCamelCase ( self : List[Any] , a_ : str ):
if self.prefix == word:
lowerCAmelCase_ : List[Any] = True
# Case 2: The node has no edges that have a prefix to the word
# Solution: We create an edge from the current node to a new one
# containing the word
elif word[0] not in self.nodes:
lowerCAmelCase_ : List[Any] = RadixNode(prefix=_snake_case , is_leaf=_snake_case )
else:
lowerCAmelCase_ : Optional[Any] = self.nodes[word[0]]
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : List[str] = incoming_node.match(
_snake_case )
# Case 3: The node prefix is equal to the matching
# Solution: We insert remaining word on the next node
if remaining_prefix == "":
self.nodes[matching_string[0]].insert(_snake_case )
# Case 4: The word is greater equal to the matching
# Solution: Create a node in between both nodes, change
# prefixes and add the new node for the remaining word
else:
lowerCAmelCase_ : List[Any] = remaining_prefix
lowerCAmelCase_ : Union[str, Any] = self.nodes[matching_string[0]]
lowerCAmelCase_ : Dict = RadixNode(_snake_case , _snake_case )
lowerCAmelCase_ : Optional[int] = aux_node
if remaining_word == "":
lowerCAmelCase_ : List[str] = True
else:
self.nodes[matching_string[0]].insert(_snake_case )
def lowerCamelCase ( self : Any , a_ : str ):
lowerCAmelCase_ : int = self.nodes.get(word[0] , _snake_case )
if not incoming_node:
return False
else:
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : int = incoming_node.match(
_snake_case )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# This applies when the word and the prefix are equal
elif remaining_word == "":
return incoming_node.is_leaf
# We have word remaining so we check the next node
else:
return incoming_node.find(_snake_case )
def lowerCamelCase ( self : Optional[int] , a_ : str ):
lowerCAmelCase_ : Optional[int] = self.nodes.get(word[0] , _snake_case )
if not incoming_node:
return False
else:
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Any = incoming_node.match(
_snake_case )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# We have word remaining so we check the next node
elif remaining_word != "":
return incoming_node.delete(_snake_case )
else:
# If it is not a leaf, we don't have to delete
if not incoming_node.is_leaf:
return False
else:
# We delete the nodes if no edges go from it
if len(incoming_node.nodes ) == 0:
del self.nodes[word[0]]
# We merge the current node with its only child
if len(self.nodes ) == 1 and not self.is_leaf:
lowerCAmelCase_ : List[Any] = list(self.nodes.values() )[0]
lowerCAmelCase_ : str = merging_node.is_leaf
self.prefix += merging_node.prefix
lowerCAmelCase_ : int = merging_node.nodes
# If there is more than 1 edge, we just mark it as non-leaf
elif len(incoming_node.nodes ) > 1:
lowerCAmelCase_ : str = False
# If there is 1 edge, we merge it with its child
else:
lowerCAmelCase_ : Dict = list(incoming_node.nodes.values() )[0]
lowerCAmelCase_ : Tuple = merging_node.is_leaf
incoming_node.prefix += merging_node.prefix
lowerCAmelCase_ : Optional[Any] = merging_node.nodes
return True
def lowerCamelCase ( self : Tuple , a_ : int = 0 ):
if self.prefix != "":
print("-" * height , self.prefix , " (leaf)" if self.is_leaf else "" )
for value in self.nodes.values():
value.print_tree(height + 1 )
def __lowerCamelCase ( ) -> bool:
"""simple docstring"""
lowerCAmelCase_ : Optional[int] = "banana bananas bandana band apple all beast".split()
lowerCAmelCase_ : str = RadixNode()
root.insert_many(__A )
assert all(root.find(__A ) for word in words )
assert not root.find("bandanas" )
assert not root.find("apps" )
root.delete("all" )
assert not root.find("all" )
root.delete("banana" )
assert not root.find("banana" )
assert root.find("bananas" )
return True
def __lowerCamelCase ( ) -> None:
"""simple docstring"""
assert test_trie()
def __lowerCamelCase ( ) -> None:
"""simple docstring"""
lowerCAmelCase_ : Any = RadixNode()
lowerCAmelCase_ : Union[str, Any] = "banana bananas bandanas bandana band apple all beast".split()
root.insert_many(__A )
print("Words:" , __A )
print("Tree:" )
root.print_tree()
if __name__ == "__main__":
main()
| 241
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ : Any = logging.get_logger(__name__)
snake_case_ : Optional[Any] = {
"tiiuae/falcon-40b": "https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json",
"tiiuae/falcon-7b": "https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json",
}
class __snake_case ( a ):
UpperCAmelCase__ : Optional[Any] = '''falcon'''
UpperCAmelCase__ : List[Any] = ['''past_key_values''']
def __init__( self : Union[str, Any] , _snake_case : List[str]=65024 , _snake_case : int=4544 , _snake_case : int=32 , _snake_case : Any=71 , _snake_case : int=1e-5 , _snake_case : Dict=0.0_2 , _snake_case : int=True , _snake_case : List[Any]=0.0 , _snake_case : Tuple=0.0 , _snake_case : int=None , _snake_case : Tuple=False , _snake_case : Any=False , _snake_case : str=True , _snake_case : Any=True , _snake_case : List[str]=False , _snake_case : Tuple=11 , _snake_case : Dict=11 , **_snake_case : Optional[int] , ):
"""simple docstring"""
UpperCAmelCase_ = vocab_size
# Backward compatibility with n_embed kwarg
UpperCAmelCase_ = kwargs.pop('''n_embed''' , _snake_case)
UpperCAmelCase_ = hidden_size if n_embed is None else n_embed
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = layer_norm_epsilon
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = use_cache
UpperCAmelCase_ = hidden_dropout
UpperCAmelCase_ = attention_dropout
UpperCAmelCase_ = bos_token_id
UpperCAmelCase_ = eos_token_id
UpperCAmelCase_ = num_attention_heads if num_kv_heads is None else num_kv_heads
UpperCAmelCase_ = alibi
UpperCAmelCase_ = new_decoder_architecture
UpperCAmelCase_ = multi_query # Ignored when new_decoder_architecture is True
UpperCAmelCase_ = parallel_attn
UpperCAmelCase_ = bias
super().__init__(bos_token_id=_snake_case , eos_token_id=_snake_case , **_snake_case)
@property
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
return self.hidden_size // self.num_attention_heads
@property
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
return not self.alibi
| 51
| 0
|
'''simple docstring'''
def lowerCAmelCase (__A , __A):
"""simple docstring"""
return price * (1 + tax_rate)
if __name__ == "__main__":
print(F"""{price_plus_tax(100, 0.25) = }""")
print(F"""{price_plus_tax(125.50, 0.05) = }""")
| 211
|
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
snake_case_ : str = 0
snake_case_ : Union[str, Any] = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
snake_case_ : str = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
snake_case_ : List[Any] = tuple[int, int]
class __snake_case :
def __init__( self : Any , _snake_case : int , _snake_case : int , _snake_case : int , _snake_case : int , _snake_case : int , _snake_case : Node | None , ):
"""simple docstring"""
UpperCAmelCase_ = pos_x
UpperCAmelCase_ = pos_y
UpperCAmelCase_ = (pos_y, pos_x)
UpperCAmelCase_ = goal_x
UpperCAmelCase_ = goal_y
UpperCAmelCase_ = g_cost
UpperCAmelCase_ = parent
UpperCAmelCase_ = self.calculate_heuristic()
UpperCAmelCase_ = self.g_cost + self.h_cost
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = self.pos_x - self.goal_x
UpperCAmelCase_ = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(_snake_case) + abs(_snake_case)
else:
return sqrt(dy**2 + dx**2)
def __lt__( self : Union[str, Any] , _snake_case : Node):
"""simple docstring"""
return self.f_cost < other.f_cost
class __snake_case :
def __init__( self : str , _snake_case : TPosition , _snake_case : TPosition):
"""simple docstring"""
UpperCAmelCase_ = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , _snake_case)
UpperCAmelCase_ = Node(goal[1] , goal[0] , goal[1] , goal[0] , 99999 , _snake_case)
UpperCAmelCase_ = [self.start]
UpperCAmelCase_ = []
UpperCAmelCase_ = False
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
UpperCAmelCase_ = self.open_nodes.pop(0)
if current_node.pos == self.target.pos:
return self.retrace_path(_snake_case)
self.closed_nodes.append(_snake_case)
UpperCAmelCase_ = self.get_successors(_snake_case)
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(_snake_case)
else:
# retrieve the best current path
UpperCAmelCase_ = self.open_nodes.pop(self.open_nodes.index(_snake_case))
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(_snake_case)
else:
self.open_nodes.append(_snake_case)
return [self.start.pos]
def lowerCamelCase ( self : Tuple , _snake_case : Node):
"""simple docstring"""
UpperCAmelCase_ = []
for action in delta:
UpperCAmelCase_ = parent.pos_x + action[1]
UpperCAmelCase_ = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0]) - 1 and 0 <= pos_y <= len(_snake_case) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
_snake_case , _snake_case , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , _snake_case , ))
return successors
def lowerCamelCase ( self : Any , _snake_case : Node | None):
"""simple docstring"""
UpperCAmelCase_ = node
UpperCAmelCase_ = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x))
UpperCAmelCase_ = current_node.parent
path.reverse()
return path
class __snake_case :
def __init__( self : Any , _snake_case : TPosition , _snake_case : TPosition):
"""simple docstring"""
UpperCAmelCase_ = AStar(_snake_case , _snake_case)
UpperCAmelCase_ = AStar(_snake_case , _snake_case)
UpperCAmelCase_ = False
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
UpperCAmelCase_ = self.fwd_astar.open_nodes.pop(0)
UpperCAmelCase_ = self.bwd_astar.open_nodes.pop(0)
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
_snake_case , _snake_case)
self.fwd_astar.closed_nodes.append(_snake_case)
self.bwd_astar.closed_nodes.append(_snake_case)
UpperCAmelCase_ = current_bwd_node
UpperCAmelCase_ = current_fwd_node
UpperCAmelCase_ = {
self.fwd_astar: self.fwd_astar.get_successors(_snake_case),
self.bwd_astar: self.bwd_astar.get_successors(_snake_case),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(_snake_case)
else:
# retrieve the best current path
UpperCAmelCase_ = astar.open_nodes.pop(
astar.open_nodes.index(_snake_case))
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(_snake_case)
else:
astar.open_nodes.append(_snake_case)
return [self.fwd_astar.start.pos]
def lowerCamelCase ( self : int , _snake_case : Node , _snake_case : Node):
"""simple docstring"""
UpperCAmelCase_ = self.fwd_astar.retrace_path(_snake_case)
UpperCAmelCase_ = self.bwd_astar.retrace_path(_snake_case)
bwd_path.pop()
bwd_path.reverse()
UpperCAmelCase_ = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
snake_case_ : Any = (0, 0)
snake_case_ : Union[str, Any] = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
snake_case_ : str = time.time()
snake_case_ : List[str] = AStar(init, goal)
snake_case_ : Optional[int] = a_star.search()
snake_case_ : Optional[Any] = time.time() - start_time
print(f"AStar execution time = {end_time:f} seconds")
snake_case_ : int = time.time()
snake_case_ : Dict = BidirectionalAStar(init, goal)
snake_case_ : str = time.time() - bd_start_time
print(f"BidirectionalAStar execution time = {bd_end_time:f} seconds")
| 51
| 0
|
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class a__ ( unittest.TestCase ):
@slow
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = FlaxMTaForConditionalGeneration.from_pretrained("google/mt5-small" )
SCREAMING_SNAKE_CASE_ : List[Any] = AutoTokenizer.from_pretrained("google/mt5-small" )
SCREAMING_SNAKE_CASE_ : Dict = tokenizer("Hello there",return_tensors="np" ).input_ids
SCREAMING_SNAKE_CASE_ : List[Any] = tokenizer("Hi I am",return_tensors="np" ).input_ids
SCREAMING_SNAKE_CASE_ : Any = shift_tokens_right(_snake_case,model.config.pad_token_id,model.config.decoder_start_token_id )
SCREAMING_SNAKE_CASE_ : Tuple = model(_snake_case,decoder_input_ids=_snake_case ).logits
SCREAMING_SNAKE_CASE_ : Dict = optax.softmax_cross_entropy(_snake_case,onehot(_snake_case,logits.shape[-1] ) ).mean()
SCREAMING_SNAKE_CASE_ : Optional[Any] = -(labels.shape[-1] * loss.item())
SCREAMING_SNAKE_CASE_ : Union[str, Any] = -84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
| 18
|
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class __snake_case :
def __init__( self : List[str] , _snake_case : Union[str, Any] , _snake_case : List[str]=2 , _snake_case : Any=True , _snake_case : Any=False , _snake_case : List[str]=10 , _snake_case : Any=3 , _snake_case : Union[str, Any]=32 * 4 , _snake_case : List[Any]=32 * 6 , _snake_case : Tuple=4 , _snake_case : Dict=32 , ):
"""simple docstring"""
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = is_training
UpperCAmelCase_ = use_auxiliary_loss
UpperCAmelCase_ = num_queries
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = min_size
UpperCAmelCase_ = max_size
UpperCAmelCase_ = num_labels
UpperCAmelCase_ = mask_feature_size
def lowerCamelCase ( self : Any):
"""simple docstring"""
UpperCAmelCase_ = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size]).to(
_snake_case)
UpperCAmelCase_ = torch.ones([self.batch_size, self.min_size, self.max_size] , device=_snake_case)
UpperCAmelCase_ = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=_snake_case) > 0.5
).float()
UpperCAmelCase_ = (torch.rand((self.batch_size, self.num_labels) , device=_snake_case) > 0.5).long()
UpperCAmelCase_ = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def lowerCamelCase ( self : Any):
"""simple docstring"""
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig(
decoder_ffn_dim=128 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , )
def lowerCamelCase ( self : int):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = self.prepare_config_and_inputs()
UpperCAmelCase_ = {'''pixel_values''': pixel_values, '''pixel_mask''': pixel_mask}
return config, inputs_dict
def lowerCamelCase ( self : str , _snake_case : List[Any] , _snake_case : List[str]):
"""simple docstring"""
UpperCAmelCase_ = output.encoder_hidden_states
UpperCAmelCase_ = output.pixel_decoder_hidden_states
UpperCAmelCase_ = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(_snake_case) , len(config.backbone_config.depths))
self.parent.assertTrue(len(_snake_case) , len(config.backbone_config.depths))
self.parent.assertTrue(len(_snake_case) , config.decoder_config.decoder_layers)
def lowerCamelCase ( self : Union[str, Any] , _snake_case : List[str] , _snake_case : int , _snake_case : Optional[Any] , _snake_case : str=False):
"""simple docstring"""
with torch.no_grad():
UpperCAmelCase_ = MaskFormerModel(config=_snake_case)
model.to(_snake_case)
model.eval()
UpperCAmelCase_ = model(pixel_values=_snake_case , pixel_mask=_snake_case)
UpperCAmelCase_ = model(_snake_case , output_hidden_states=_snake_case)
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None)
self.parent.assertTrue(output.encoder_last_hidden_state is not None)
if output_hidden_states:
self.check_output_hidden_state(_snake_case , _snake_case)
def lowerCamelCase ( self : List[Any] , _snake_case : List[Any] , _snake_case : List[Any] , _snake_case : str , _snake_case : Optional[int] , _snake_case : Union[str, Any]):
"""simple docstring"""
UpperCAmelCase_ = MaskFormerForInstanceSegmentation(config=_snake_case)
model.to(_snake_case)
model.eval()
def comm_check_on_output(_snake_case : Tuple):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None)
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None)
self.parent.assertTrue(result.encoder_last_hidden_state is not None)
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1))
with torch.no_grad():
UpperCAmelCase_ = model(pixel_values=_snake_case , pixel_mask=_snake_case)
UpperCAmelCase_ = model(_snake_case)
comm_check_on_output(_snake_case)
UpperCAmelCase_ = model(
pixel_values=_snake_case , pixel_mask=_snake_case , mask_labels=_snake_case , class_labels=_snake_case)
comm_check_on_output(_snake_case)
self.parent.assertTrue(result.loss is not None)
self.parent.assertEqual(result.loss.shape , torch.Size([1]))
@require_torch
class __snake_case ( a , a , unittest.TestCase ):
UpperCAmelCase__ : Union[str, Any] = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
UpperCAmelCase__ : Optional[Any] = (
{'''feature-extraction''': MaskFormerModel, '''image-segmentation''': MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
UpperCAmelCase__ : Dict = False
UpperCAmelCase__ : List[str] = False
UpperCAmelCase__ : Optional[Any] = False
UpperCAmelCase__ : Union[str, Any] = False
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = MaskFormerModelTester(self)
UpperCAmelCase_ = ConfigTester(self , config_class=_snake_case , has_text_modality=_snake_case)
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(_snake_case , **_snake_case , output_hidden_states=_snake_case)
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*_snake_case)
@unittest.skip(reason='''MaskFormer does not use inputs_embeds''')
def lowerCamelCase ( self : Dict):
"""simple docstring"""
pass
@unittest.skip(reason='''MaskFormer does not have a get_input_embeddings method''')
def lowerCamelCase ( self : int):
"""simple docstring"""
pass
@unittest.skip(reason='''MaskFormer is not a generative model''')
def lowerCamelCase ( self : str):
"""simple docstring"""
pass
@unittest.skip(reason='''MaskFormer does not use token embeddings''')
def lowerCamelCase ( self : int):
"""simple docstring"""
pass
@require_torch_multi_gpu
@unittest.skip(
reason='''MaskFormer has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''')
def lowerCamelCase ( self : Any):
"""simple docstring"""
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''')
def lowerCamelCase ( self : str):
"""simple docstring"""
pass
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(_snake_case)
UpperCAmelCase_ = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ = [*signature.parameters.keys()]
UpperCAmelCase_ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _snake_case)
@slow
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
for model_name in ["facebook/maskformer-swin-small-coco"]:
UpperCAmelCase_ = MaskFormerModel.from_pretrained(_snake_case)
self.assertIsNotNone(_snake_case)
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ = (self.model_tester.min_size,) * 2
UpperCAmelCase_ = {
'''pixel_values''': torch.randn((2, 3, *size) , device=_snake_case),
'''mask_labels''': torch.randn((2, 10, *size) , device=_snake_case),
'''class_labels''': torch.zeros(2 , 10 , device=_snake_case).long(),
}
UpperCAmelCase_ = MaskFormerForInstanceSegmentation(MaskFormerConfig()).to(_snake_case)
UpperCAmelCase_ = model(**_snake_case)
self.assertTrue(outputs.loss is not None)
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(_snake_case , **_snake_case , output_hidden_states=_snake_case)
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(_snake_case).to(_snake_case)
UpperCAmelCase_ = model(**_snake_case , output_attentions=_snake_case)
self.assertTrue(outputs.attentions is not None)
def lowerCamelCase ( self : int):
"""simple docstring"""
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
UpperCAmelCase_ = self.all_model_classes[1]
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
UpperCAmelCase_ = model_class(_snake_case)
model.to(_snake_case)
model.train()
UpperCAmelCase_ = model(_snake_case , mask_labels=_snake_case , class_labels=_snake_case).loss
loss.backward()
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
UpperCAmelCase_ = self.all_model_classes[1]
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
UpperCAmelCase_ = True
UpperCAmelCase_ = True
UpperCAmelCase_ = model_class(_snake_case)
model.to(_snake_case)
model.train()
UpperCAmelCase_ = model(_snake_case , mask_labels=_snake_case , class_labels=_snake_case)
UpperCAmelCase_ = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
UpperCAmelCase_ = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
UpperCAmelCase_ = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
UpperCAmelCase_ = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=_snake_case)
self.assertIsNotNone(encoder_hidden_states.grad)
self.assertIsNotNone(pixel_decoder_hidden_states.grad)
self.assertIsNotNone(transformer_decoder_hidden_states.grad)
self.assertIsNotNone(attentions.grad)
snake_case_ : Dict = 1e-4
def A () -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_vision
@slow
class __snake_case ( unittest.TestCase ):
@cached_property
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
return (
MaskFormerImageProcessor.from_pretrained('''facebook/maskformer-swin-small-coco''')
if is_vision_available()
else None
)
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = MaskFormerModel.from_pretrained('''facebook/maskformer-swin-small-coco''').to(_snake_case)
UpperCAmelCase_ = self.default_image_processor
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = image_processor(_snake_case , return_tensors='''pt''').to(_snake_case)
UpperCAmelCase_ = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0)
# check size
self.assertEqual(_snake_case , (1, 3, 800, 1088))
with torch.no_grad():
UpperCAmelCase_ = model(**_snake_case)
UpperCAmelCase_ = torch.tensor(
[[-0.0_4_8_2, 0.9_2_2_8, 0.4_9_5_1], [-0.2_5_4_7, 0.8_0_1_7, 0.8_5_2_7], [-0.0_0_6_9, 0.3_3_8_5, -0.0_0_8_9]]).to(_snake_case)
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , _snake_case , atol=_snake_case))
UpperCAmelCase_ = torch.tensor(
[[-0.8_4_2_2, -0.8_4_3_4, -0.9_7_1_8], [-1.0_1_4_4, -0.5_5_6_5, -0.4_1_9_5], [-1.0_0_3_8, -0.4_4_8_4, -0.1_9_6_1]]).to(_snake_case)
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , _snake_case , atol=_snake_case))
UpperCAmelCase_ = torch.tensor(
[[0.2_8_5_2, -0.0_1_5_9, 0.9_7_3_5], [0.6_2_5_4, 0.1_8_5_8, 0.8_5_2_9], [-0.0_6_8_0, -0.4_1_1_6, 1.8_4_1_3]]).to(_snake_case)
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , _snake_case , atol=_snake_case))
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''')
.to(_snake_case)
.eval()
)
UpperCAmelCase_ = self.default_image_processor
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = image_processor(_snake_case , return_tensors='''pt''').to(_snake_case)
UpperCAmelCase_ = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0)
# check size
self.assertEqual(_snake_case , (1, 3, 800, 1088))
with torch.no_grad():
UpperCAmelCase_ = model(**_snake_case)
# masks_queries_logits
UpperCAmelCase_ = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
UpperCAmelCase_ = [
[-1.3_7_3_7_1_2_4, -1.7_7_2_4_9_3_7, -1.9_3_6_4_2_3_3],
[-1.5_9_7_7_2_8_1, -1.9_8_6_7_9_3_9, -2.1_5_2_3_6_9_5],
[-1.5_7_9_5_3_9_8, -1.9_2_6_9_8_3_2, -2.0_9_3_9_4_2],
]
UpperCAmelCase_ = torch.tensor(_snake_case).to(_snake_case)
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , _snake_case , atol=_snake_case))
# class_queries_logits
UpperCAmelCase_ = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1))
UpperCAmelCase_ = torch.tensor(
[
[1.65_12e00, -5.25_72e00, -3.35_19e00],
[3.61_69e-02, -5.90_25e00, -2.93_13e00],
[1.07_66e-04, -7.76_30e00, -5.12_63e00],
]).to(_snake_case)
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , _snake_case , atol=_snake_case))
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-resnet101-coco-stuff''')
.to(_snake_case)
.eval()
)
UpperCAmelCase_ = self.default_image_processor
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = image_processor(_snake_case , return_tensors='''pt''').to(_snake_case)
UpperCAmelCase_ = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0)
# check size
self.assertEqual(_snake_case , (1, 3, 800, 1088))
with torch.no_grad():
UpperCAmelCase_ = model(**_snake_case)
# masks_queries_logits
UpperCAmelCase_ = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
UpperCAmelCase_ = [[-0.9_0_4_6, -2.6_3_6_6, -4.6_0_6_2], [-3.4_1_7_9, -5.7_8_9_0, -8.8_0_5_7], [-4.9_1_7_9, -7.6_5_6_0, -1_0.7_7_1_1]]
UpperCAmelCase_ = torch.tensor(_snake_case).to(_snake_case)
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , _snake_case , atol=_snake_case))
# class_queries_logits
UpperCAmelCase_ = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1))
UpperCAmelCase_ = torch.tensor(
[[4.7_1_8_8, -3.2_5_8_5, -2.8_8_5_7], [6.6_8_7_1, -2.9_1_8_1, -1.2_4_8_7], [7.2_4_4_9, -2.2_7_6_4, -2.1_8_7_4]]).to(_snake_case)
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , _snake_case , atol=_snake_case))
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''')
.to(_snake_case)
.eval()
)
UpperCAmelCase_ = self.default_image_processor
UpperCAmelCase_ = image_processor(
[np.zeros((3, 800, 1333)), np.zeros((3, 800, 1333))] , segmentation_maps=[np.zeros((384, 384)).astype(np.floataa), np.zeros((384, 384)).astype(np.floataa)] , return_tensors='''pt''' , )
UpperCAmelCase_ = inputs['''pixel_values'''].to(_snake_case)
UpperCAmelCase_ = [el.to(_snake_case) for el in inputs['''mask_labels''']]
UpperCAmelCase_ = [el.to(_snake_case) for el in inputs['''class_labels''']]
with torch.no_grad():
UpperCAmelCase_ = model(**_snake_case)
self.assertTrue(outputs.loss is not None)
| 51
| 0
|
"""simple docstring"""
def _snake_case ( lowercase__ : int ) -> int:
'''simple docstring'''
if a < 0:
raise ValueError("""Input value must be a positive integer""" )
elif isinstance(__A , __A ):
raise TypeError("""Input value must be a \'int\' type""" )
return bin(__A ).count("""1""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 84
|
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def A (__A : Optional[int] , __A : int , __A : str=None ) -> List[Any]:
"""simple docstring"""
assert torch_layer.weight.shape == weight.shape, F"""{torch_layer} layer.weight does not match"""
UpperCAmelCase_ = nn.Parameter(__A )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, F"""{torch_layer} layer.bias does not match"""
UpperCAmelCase_ = nn.Parameter(__A )
def A (__A : Tuple , __A : Dict , __A : str ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ = np.asarray(weights[0] )
UpperCAmelCase_ = np.asarray(weights[1] )
UpperCAmelCase_ = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(__A ).transpose(1 , 2 ).contiguous().view(-1 , __A ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(__A ).transpose(1 , 2 ).contiguous().view(-1 , __A ) , )
set_param(
torch_layer.output.dense , torch.tensor(__A ).view(-1 , __A ).contiguous().transpose(0 , 1 ) , )
def A (__A : Optional[Any] , __A : Any , __A : List[Any] ) -> int:
"""simple docstring"""
UpperCAmelCase_ = np.asarray(weights[0] )
UpperCAmelCase_ = np.asarray(weights[1] )
UpperCAmelCase_ = np.asarray(weights[2] )
UpperCAmelCase_ = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(__A ).transpose(1 , 2 ).contiguous().view(-1 , __A ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(__A ).transpose(1 , 2 ).contiguous().view(-1 , __A ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(__A ).transpose(1 , 2 ).contiguous().view(-1 , __A ) , )
set_param(
torch_layer.output.dense , torch.tensor(__A ).view(-1 , __A ).contiguous().transpose(0 , 1 ) , )
def A (__A : int , __A : Union[str, Any] , __A : List[str] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ = weights[0][0][0]
UpperCAmelCase_ = np.asarray(layer_norm_a[0] )
UpperCAmelCase_ = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(__A ) , torch.tensor(__A ) , )
# lsh weights + output
UpperCAmelCase_ = weights[0][1]
if len(__A ) < 4:
set_layer_weights_in_torch_lsh(__A , torch_block.attention , __A )
else:
set_layer_weights_in_torch_local(__A , torch_block.attention , __A )
# intermediate weighs
UpperCAmelCase_ = weights[2][0][1][2]
# Chunked Feed Forward
if len(__A ) == 4:
UpperCAmelCase_ = intermediate_weights[2]
# layernorm 2
UpperCAmelCase_ = np.asarray(intermediate_weights[0][0] )
UpperCAmelCase_ = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(__A ) , torch.tensor(__A ) , )
# intermediate dense
UpperCAmelCase_ = np.asarray(intermediate_weights[1][0] )
UpperCAmelCase_ = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(__A ).transpose(0 , 1 ).contiguous() , torch.tensor(__A ) , )
# intermediate out
UpperCAmelCase_ = np.asarray(intermediate_weights[4][0] )
UpperCAmelCase_ = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(__A ).transpose(0 , 1 ).contiguous() , torch.tensor(__A ) , )
def A (__A : Optional[int] , __A : Tuple , __A : Any ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ = torch_model.reformer
# word embeds
UpperCAmelCase_ = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(__A ) , )
if isinstance(weights[3] , __A ):
UpperCAmelCase_ = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
UpperCAmelCase_ = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), F"""{position_embeddings[emb_idx]} emb does not match"""
UpperCAmelCase_ = nn.Parameter(torch.tensor(__A ) )
UpperCAmelCase_ = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
__A ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
UpperCAmelCase_ = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(__A , __A , __A )
# output layer norm
UpperCAmelCase_ = np.asarray(weights[7][0] )
UpperCAmelCase_ = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(__A ) , torch.tensor(__A ) , )
# output embeddings
UpperCAmelCase_ = np.asarray(weights[9][0] )
UpperCAmelCase_ = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(__A ).transpose(0 , 1 ).contiguous() , torch.tensor(__A ) , )
def A (__A : Tuple , __A : int , __A : str ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ = ReformerConfig.from_json_file(__A )
print(F"""Building PyTorch model from configuration: {config}""" )
UpperCAmelCase_ = ReformerModelWithLMHead(__A )
with open(__A , '''rb''' ) as f:
UpperCAmelCase_ = pickle.load(__A )['''weights''']
set_model_weights_in_torch(__A , __A , config.hidden_size )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , __A )
if __name__ == "__main__":
snake_case_ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--trax_model_pkl_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained Reformer model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
snake_case_ : List[Any] = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 51
| 0
|
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class SCREAMING_SNAKE_CASE( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Any , __snake_case : int , __snake_case : List[str]=7 , __snake_case : Dict=3 , __snake_case : str=18 , __snake_case : Optional[Any]=30 , __snake_case : Any=400 , __snake_case : Optional[Any]=True , __snake_case : Tuple=None , __snake_case : List[Any]=True , __snake_case : Tuple=False , __snake_case : str=True , __snake_case : Any=True , __snake_case : Dict=[0.5, 0.5, 0.5] , __snake_case : Optional[int]=[0.5, 0.5, 0.5] , ) -> List[str]:
UpperCAmelCase : Optional[int] = parent
UpperCAmelCase : Union[str, Any] = batch_size
UpperCAmelCase : List[str] = num_channels
UpperCAmelCase : Union[str, Any] = image_size
UpperCAmelCase : str = min_resolution
UpperCAmelCase : Tuple = max_resolution
UpperCAmelCase : List[str] = do_resize
UpperCAmelCase : int = size if size is not None else {'''height''': 18, '''width''': 20}
UpperCAmelCase : Tuple = do_thumbnail
UpperCAmelCase : List[str] = do_align_axis
UpperCAmelCase : List[str] = do_pad
UpperCAmelCase : Dict = do_normalize
UpperCAmelCase : List[str] = image_mean
UpperCAmelCase : Dict = image_std
def A ( self : Optional[int] ) -> Dict:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE( A__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase__ = DonutImageProcessor if is_vision_available() else None
def A ( self : str ) -> Optional[Any]:
UpperCAmelCase : List[str] = DonutImageProcessingTester(self )
@property
def A ( self : List[Any] ) -> str:
return self.image_processor_tester.prepare_image_processor_dict()
def A ( self : Optional[int] ) -> List[Any]:
UpperCAmelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_snake_case , '''do_resize''' ) )
self.assertTrue(hasattr(_snake_case , '''size''' ) )
self.assertTrue(hasattr(_snake_case , '''do_thumbnail''' ) )
self.assertTrue(hasattr(_snake_case , '''do_align_long_axis''' ) )
self.assertTrue(hasattr(_snake_case , '''do_pad''' ) )
self.assertTrue(hasattr(_snake_case , '''do_normalize''' ) )
self.assertTrue(hasattr(_snake_case , '''image_mean''' ) )
self.assertTrue(hasattr(_snake_case , '''image_std''' ) )
def A ( self : Optional[Any] ) -> str:
UpperCAmelCase : Any = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 20} )
UpperCAmelCase : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
# Previous config had dimensions in (width, height) order
UpperCAmelCase : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) )
self.assertEqual(image_processor.size , {'''height''': 84, '''width''': 42} )
def A ( self : int ) -> Optional[int]:
pass
@is_flaky()
def A ( self : Optional[int] ) -> Any:
UpperCAmelCase : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_snake_case )
for image in image_inputs:
self.assertIsInstance(_snake_case , Image.Image )
# Test not batched input
UpperCAmelCase : Optional[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
UpperCAmelCase : Tuple = image_processing(_snake_case , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
@is_flaky()
def A ( self : Optional[Any] ) -> Union[str, Any]:
UpperCAmelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_snake_case , numpify=_snake_case )
for image in image_inputs:
self.assertIsInstance(_snake_case , np.ndarray )
# Test not batched input
UpperCAmelCase : Dict = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
UpperCAmelCase : str = image_processing(_snake_case , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
@is_flaky()
def A ( self : str ) -> int:
UpperCAmelCase : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_snake_case , torchify=_snake_case )
for image in image_inputs:
self.assertIsInstance(_snake_case , torch.Tensor )
# Test not batched input
UpperCAmelCase : Optional[int] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
UpperCAmelCase : int = image_processing(_snake_case , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
| 23
|
# This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
ControlNetModel,
DDIMScheduler,
StableDiffusionControlNetImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel
from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
)
enable_full_determinism()
class __snake_case ( a , a , a , unittest.TestCase ):
UpperCAmelCase__ : List[Any] = StableDiffusionControlNetImgaImgPipeline
UpperCAmelCase__ : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''}
UpperCAmelCase__ : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
UpperCAmelCase__ : Union[str, Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({'''control_image'''} )
UpperCAmelCase__ : Optional[Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
def lowerCamelCase ( self : int):
"""simple docstring"""
torch.manual_seed(0)
UpperCAmelCase_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
torch.manual_seed(0)
UpperCAmelCase_ = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
torch.manual_seed(0)
UpperCAmelCase_ = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='''scaled_linear''' , clip_sample=_snake_case , set_alpha_to_one=_snake_case , )
torch.manual_seed(0)
UpperCAmelCase_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0)
UpperCAmelCase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
UpperCAmelCase_ = CLIPTextModel(_snake_case)
UpperCAmelCase_ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''')
UpperCAmelCase_ = {
'''unet''': unet,
'''controlnet''': controlnet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def lowerCamelCase ( self : Union[str, Any] , _snake_case : Any , _snake_case : Dict=0):
"""simple docstring"""
if str(_snake_case).startswith('''mps'''):
UpperCAmelCase_ = torch.manual_seed(_snake_case)
else:
UpperCAmelCase_ = torch.Generator(device=_snake_case).manual_seed(_snake_case)
UpperCAmelCase_ = 2
UpperCAmelCase_ = randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=_snake_case , device=torch.device(_snake_case) , )
UpperCAmelCase_ = floats_tensor(control_image.shape , rng=random.Random(_snake_case)).to(_snake_case)
UpperCAmelCase_ = image.cpu().permute(0 , 2 , 3 , 1)[0]
UpperCAmelCase_ = Image.fromarray(np.uinta(_snake_case)).convert('''RGB''').resize((64, 64))
UpperCAmelCase_ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
'''image''': image,
'''control_image''': control_image,
}
return inputs
def lowerCamelCase ( self : Any):
"""simple docstring"""
return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3)
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def lowerCamelCase ( self : Any):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3)
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
self._test_inference_batch_single_identical(expected_max_diff=2e-3)
class __snake_case ( a , a , unittest.TestCase ):
UpperCAmelCase__ : str = StableDiffusionControlNetImgaImgPipeline
UpperCAmelCase__ : List[str] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''}
UpperCAmelCase__ : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
UpperCAmelCase__ : str = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess
def lowerCamelCase ( self : str):
"""simple docstring"""
torch.manual_seed(0)
UpperCAmelCase_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
torch.manual_seed(0)
def init_weights(_snake_case : Optional[int]):
if isinstance(_snake_case , torch.nn.Convad):
torch.nn.init.normal(m.weight)
m.bias.data.fill_(1.0)
UpperCAmelCase_ = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(_snake_case)
torch.manual_seed(0)
UpperCAmelCase_ = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(_snake_case)
torch.manual_seed(0)
UpperCAmelCase_ = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='''scaled_linear''' , clip_sample=_snake_case , set_alpha_to_one=_snake_case , )
torch.manual_seed(0)
UpperCAmelCase_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0)
UpperCAmelCase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
UpperCAmelCase_ = CLIPTextModel(_snake_case)
UpperCAmelCase_ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''')
UpperCAmelCase_ = MultiControlNetModel([controlneta, controlneta])
UpperCAmelCase_ = {
'''unet''': unet,
'''controlnet''': controlnet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def lowerCamelCase ( self : int , _snake_case : Union[str, Any] , _snake_case : str=0):
"""simple docstring"""
if str(_snake_case).startswith('''mps'''):
UpperCAmelCase_ = torch.manual_seed(_snake_case)
else:
UpperCAmelCase_ = torch.Generator(device=_snake_case).manual_seed(_snake_case)
UpperCAmelCase_ = 2
UpperCAmelCase_ = [
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=_snake_case , device=torch.device(_snake_case) , ),
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=_snake_case , device=torch.device(_snake_case) , ),
]
UpperCAmelCase_ = floats_tensor(control_image[0].shape , rng=random.Random(_snake_case)).to(_snake_case)
UpperCAmelCase_ = image.cpu().permute(0 , 2 , 3 , 1)[0]
UpperCAmelCase_ = Image.fromarray(np.uinta(_snake_case)).convert('''RGB''').resize((64, 64))
UpperCAmelCase_ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
'''image''': image,
'''control_image''': control_image,
}
return inputs
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = self.get_dummy_components()
UpperCAmelCase_ = self.pipeline_class(**_snake_case)
pipe.to(_snake_case)
UpperCAmelCase_ = 1_0.0
UpperCAmelCase_ = 4
UpperCAmelCase_ = self.get_dummy_inputs(_snake_case)
UpperCAmelCase_ = steps
UpperCAmelCase_ = scale
UpperCAmelCase_ = pipe(**_snake_case)[0]
UpperCAmelCase_ = self.get_dummy_inputs(_snake_case)
UpperCAmelCase_ = steps
UpperCAmelCase_ = scale
UpperCAmelCase_ = pipe(**_snake_case , control_guidance_start=0.1 , control_guidance_end=0.2)[0]
UpperCAmelCase_ = self.get_dummy_inputs(_snake_case)
UpperCAmelCase_ = steps
UpperCAmelCase_ = scale
UpperCAmelCase_ = pipe(**_snake_case , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7])[0]
UpperCAmelCase_ = self.get_dummy_inputs(_snake_case)
UpperCAmelCase_ = steps
UpperCAmelCase_ = scale
UpperCAmelCase_ = pipe(**_snake_case , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8])[0]
# make sure that all outputs are different
assert np.sum(np.abs(output_a - output_a)) > 1e-3
assert np.sum(np.abs(output_a - output_a)) > 1e-3
assert np.sum(np.abs(output_a - output_a)) > 1e-3
def lowerCamelCase ( self : Dict):
"""simple docstring"""
return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3)
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def lowerCamelCase ( self : int):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3)
def lowerCamelCase ( self : int):
"""simple docstring"""
self._test_inference_batch_single_identical(expected_max_diff=2e-3)
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
UpperCAmelCase_ = self.get_dummy_components()
UpperCAmelCase_ = self.pipeline_class(**_snake_case)
pipe.to(_snake_case)
pipe.set_progress_bar_config(disable=_snake_case)
with tempfile.TemporaryDirectory() as tmpdir:
try:
# save_pretrained is not implemented for Multi-ControlNet
pipe.save_pretrained(_snake_case)
except NotImplementedError:
pass
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
UpperCAmelCase_ = ControlNetModel.from_pretrained('''lllyasviel/sd-controlnet-canny''')
UpperCAmelCase_ = StableDiffusionControlNetImgaImgPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , safety_checker=_snake_case , controlnet=_snake_case)
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=_snake_case)
UpperCAmelCase_ = torch.Generator(device='''cpu''').manual_seed(0)
UpperCAmelCase_ = '''evil space-punk bird'''
UpperCAmelCase_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png''').resize((512, 512))
UpperCAmelCase_ = load_image(
'''https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png''').resize((512, 512))
UpperCAmelCase_ = pipe(
_snake_case , _snake_case , control_image=_snake_case , generator=_snake_case , output_type='''np''' , num_inference_steps=50 , strength=0.6 , )
UpperCAmelCase_ = output.images[0]
assert image.shape == (512, 512, 3)
UpperCAmelCase_ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy''')
assert np.abs(expected_image - image).max() < 9e-2
| 51
| 0
|
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_UpperCAmelCase : Tuple = logging.get_logger(__name__)
_UpperCAmelCase : Any = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
_UpperCAmelCase : List[Any] = {
"tokenizer_file": {
"EleutherAI/gpt-neox-20b": "https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json",
},
}
_UpperCAmelCase : Optional[Any] = {
"gpt-neox-20b": 20_48,
}
class lowerCAmelCase ( __UpperCamelCase ):
UpperCAmelCase__ = VOCAB_FILES_NAMES
UpperCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ = ['''input_ids''', '''attention_mask''']
def __init__( self : Optional[Any] , UpperCAmelCase : int=None , UpperCAmelCase : List[Any]=None , UpperCAmelCase : Optional[int]=None , UpperCAmelCase : Optional[int]="<|endoftext|>" , UpperCAmelCase : Optional[Any]="<|endoftext|>" , UpperCAmelCase : str="<|endoftext|>" , UpperCAmelCase : Union[str, Any]=False , **UpperCAmelCase : Dict , ) -> List[Any]:
super().__init__(
_snake_case , _snake_case , tokenizer_file=_snake_case , unk_token=_snake_case , bos_token=_snake_case , eos_token=_snake_case , add_prefix_space=_snake_case , **_snake_case , )
lowerCamelCase__ : Optional[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , _snake_case ) != add_prefix_space:
lowerCamelCase__ : Union[str, Any] = getattr(_snake_case , pre_tok_state.pop('type' ) )
lowerCamelCase__ : str = add_prefix_space
lowerCamelCase__ : Optional[Any] = pre_tok_class(**_snake_case )
lowerCamelCase__ : List[str] = add_prefix_space
def A_ ( self : str , UpperCAmelCase : str , UpperCAmelCase : Optional[str] = None ) -> List[str]:
lowerCamelCase__ : List[str] = self._tokenizer.model.save(_snake_case , name=_snake_case )
return tuple(_snake_case )
def A_ ( self : Dict , UpperCAmelCase : "Conversation" ) -> int:
lowerCamelCase__ : int = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(_snake_case , add_special_tokens=_snake_case ) + [self.eos_token_id] )
if len(_snake_case ) > self.model_max_length:
lowerCamelCase__ : List[Any] = input_ids[-self.model_max_length :]
return input_ids
| 50
|
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
snake_case_ : Tuple = logging.get_logger(__name__)
def A (__A : bool , __A : bool ) -> Optional[Any]:
"""simple docstring"""
def run_func(__A : Optional[Any] ):
@wraps(__A )
def run_in_eager_mode(*__A : Dict , **__A : List[Any] ):
return func(*__A , **__A )
@wraps(__A )
@tf.function(experimental_compile=__A )
def run_in_graph_mode(*__A : Optional[Any] , **__A : Any ):
return func(*__A , **__A )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
'''Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.''' )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def A (__A : int , __A : int , __A : int ) -> ["tf.Tensor"]:
"""simple docstring"""
UpperCAmelCase_ = random.Random()
UpperCAmelCase_ = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(__A , shape=(batch_size, sequence_length) , dtype=tf.intaa )
class __snake_case ( a ):
UpperCAmelCase__ : TensorFlowBenchmarkArguments
UpperCAmelCase__ : PretrainedConfig
UpperCAmelCase__ : str = "TensorFlow"
@property
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
return tf.__version__
def lowerCamelCase ( self : Dict , _snake_case : str , _snake_case : int , _snake_case : int):
"""simple docstring"""
UpperCAmelCase_ = self.args.strategy
if strategy is None:
raise ValueError('''A device strategy has to be initialized before using TensorFlow.''')
UpperCAmelCase_ = self._prepare_inference_func(_snake_case , _snake_case , _snake_case)
return self._measure_speed(_inference)
def lowerCamelCase ( self : Any , _snake_case : str , _snake_case : int , _snake_case : int):
"""simple docstring"""
UpperCAmelCase_ = self.args.strategy
if strategy is None:
raise ValueError('''A device strategy has to be initialized before using TensorFlow.''')
UpperCAmelCase_ = self._prepare_train_func(_snake_case , _snake_case , _snake_case)
return self._measure_speed(_train)
def lowerCamelCase ( self : Any , _snake_case : str , _snake_case : int , _snake_case : int):
"""simple docstring"""
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , _snake_case)
UpperCAmelCase_ = self.args.strategy
if strategy is None:
raise ValueError('''A device strategy has to be initialized before using TensorFlow.''')
UpperCAmelCase_ = self._prepare_inference_func(_snake_case , _snake_case , _snake_case)
return self._measure_memory(_inference)
def lowerCamelCase ( self : Optional[Any] , _snake_case : str , _snake_case : int , _snake_case : int):
"""simple docstring"""
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , _snake_case)
UpperCAmelCase_ = self.args.strategy
if strategy is None:
raise ValueError('''A device strategy has to be initialized before using TensorFlow.''')
UpperCAmelCase_ = self._prepare_train_func(_snake_case , _snake_case , _snake_case)
return self._measure_memory(_train)
def lowerCamelCase ( self : Optional[int] , _snake_case : str , _snake_case : int , _snake_case : int):
"""simple docstring"""
UpperCAmelCase_ = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError('''Mixed precision is currently not supported.''')
UpperCAmelCase_ = (
hasattr(_snake_case , '''architectures''')
and isinstance(config.architectures , _snake_case)
and len(config.architectures) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
UpperCAmelCase_ = '''TF''' + config.architectures[0] # prepend 'TF' for tensorflow model
UpperCAmelCase_ = __import__('''transformers''' , fromlist=[model_class])
UpperCAmelCase_ = getattr(_snake_case , _snake_case)
UpperCAmelCase_ = model_cls(_snake_case)
except ImportError:
raise ImportError(
F"""{model_class} does not exist. If you just want to test the pretrained model, you might want to"""
''' set `--only_pretrain_model` or `args.only_pretrain_model=True`.''')
else:
UpperCAmelCase_ = TF_MODEL_MAPPING[config.__class__](_snake_case)
# encoder-decoder has vocab size saved differently
UpperCAmelCase_ = config.vocab_size if hasattr(_snake_case , '''vocab_size''') else config.encoder.vocab_size
UpperCAmelCase_ = random_input_ids(_snake_case , _snake_case , _snake_case)
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla)
def encoder_decoder_forward():
return model(_snake_case , decoder_input_ids=_snake_case , training=_snake_case)
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla)
def encoder_forward():
return model(_snake_case , training=_snake_case)
UpperCAmelCase_ = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def lowerCamelCase ( self : Optional[Any] , _snake_case : str , _snake_case : int , _snake_case : int):
"""simple docstring"""
UpperCAmelCase_ = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError('''Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.''')
if self.args.fpaa:
raise NotImplementedError('''Mixed precision is currently not supported.''')
UpperCAmelCase_ = (
hasattr(_snake_case , '''architectures''')
and isinstance(config.architectures , _snake_case)
and len(config.architectures) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
UpperCAmelCase_ = '''TF''' + config.architectures[0] # prepend 'TF' for tensorflow model
UpperCAmelCase_ = __import__('''transformers''' , fromlist=[model_class])
UpperCAmelCase_ = getattr(_snake_case , _snake_case)
UpperCAmelCase_ = model_cls(_snake_case)
except ImportError:
raise ImportError(
F"""{model_class} does not exist. If you just want to test the pretrained model, you might want to"""
''' set `--only_pretrain_model` or `args.only_pretrain_model=True`.''')
else:
UpperCAmelCase_ = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](_snake_case)
# encoder-decoder has vocab size saved differently
UpperCAmelCase_ = config.vocab_size if hasattr(_snake_case , '''vocab_size''') else config.encoder.vocab_size
UpperCAmelCase_ = random_input_ids(_snake_case , _snake_case , _snake_case)
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla)
def encoder_decoder_train():
UpperCAmelCase_ = model(_snake_case , decoder_input_ids=_snake_case , labels=_snake_case , training=_snake_case)[0]
UpperCAmelCase_ = tf.gradients(_snake_case , model.trainable_variables)
return gradients
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla)
def encoder_train():
UpperCAmelCase_ = model(_snake_case , labels=_snake_case , training=_snake_case)[0]
UpperCAmelCase_ = tf.gradients(_snake_case , model.trainable_variables)
return gradients
UpperCAmelCase_ = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def lowerCamelCase ( self : Any , _snake_case : Optional[Any]):
"""simple docstring"""
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info('''Do inference on TPU. Running model 5 times to stabilize compilation''')
timeit.repeat(_snake_case , repeat=1 , number=5)
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
UpperCAmelCase_ = timeit.repeat(
_snake_case , repeat=self.args.repeat , number=10 , )
return min(_snake_case) / 1_0.0
except ResourceExhaustedError as e:
self.print_fn(F"""Doesn't fit on GPU. {e}""")
def lowerCamelCase ( self : Dict , _snake_case : Callable[[], None]):
"""simple docstring"""
logger.info(
'''Note that TensorFlow allocates more memory than '''
'''it might need to speed up computation. '''
'''The memory reported here corresponds to the memory '''
'''reported by `nvidia-smi`, which can vary depending '''
'''on total available memory on the GPU that is used.''')
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
'''`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory'''
''' consumption line by line.''')
UpperCAmelCase_ = start_memory_tracing('''transformers''')
if self.args.is_tpu:
# tpu
raise NotImplementedError(
'''Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking'''
''' with `args.memory=False`''')
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
'''py3nvml not installed, we won\'t log GPU memory usage. '''
'''Install py3nvml (pip install py3nvml) to log information about GPU.''')
UpperCAmelCase_ = '''N/A'''
else:
logger.info(
'''Measuring total GPU usage on GPU device. Make sure to not have additional processes'''
''' running on the same GPU.''')
# init nvml
nvml.nvmlInit()
func()
UpperCAmelCase_ = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx)
UpperCAmelCase_ = nvml.nvmlDeviceGetMemoryInfo(_snake_case)
UpperCAmelCase_ = meminfo.used
UpperCAmelCase_ = Memory(_snake_case)
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
'''When enabling line by line tracing, the max peak memory for CPU is inaccurate in'''
''' TensorFlow.''')
UpperCAmelCase_ = None
else:
UpperCAmelCase_ = measure_peak_memory_cpu(_snake_case)
UpperCAmelCase_ = Memory(_snake_case) if isinstance(_snake_case , _snake_case) else memory_bytes
if self.args.trace_memory_line_by_line:
UpperCAmelCase_ = stop_memory_tracing(_snake_case)
if memory is None:
UpperCAmelCase_ = summary.total
else:
UpperCAmelCase_ = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(F"""Doesn't fit on GPU. {e}""")
return "N/A", None
| 51
| 0
|
from __future__ import annotations
class snake_case__ :
"""simple docstring"""
def __init__( self , __lowercase=None ) -> str:
"""simple docstring"""
a__ : int = data
a__ : List[str] = None
def __repr__( self ) -> Optional[Any]:
"""simple docstring"""
a__ : Tuple = []
a__ : Tuple = self
while temp:
string_rep.append(F'''{temp.data}''' )
a__ : List[Any] = temp.next
return "->".join(_snake_case )
def lowerCAmelCase_ ( _lowercase : list) -> List[str]:
"""simple docstring"""
if not elements_list:
raise Exception("""The Elements List is empty""")
a__ : Dict = Node(elements_list[0])
for i in range(1 , len(__A)):
a__ : Optional[int] = Node(elements_list[i])
a__ : Union[str, Any] = current.next
return head
def lowerCAmelCase_ ( _lowercase : Node) -> None:
"""simple docstring"""
if head_node is not None and isinstance(__A , __A):
print_reverse(head_node.next)
print(head_node.data)
def lowerCAmelCase_ ( ) -> List[str]:
"""simple docstring"""
from doctest import testmod
testmod()
a__ : Optional[Any] = make_linked_list([14, 52, 14, 12, 43])
print("""Linked List:""")
print(__A)
print("""Elements in Reverse:""")
print_reverse(__A)
if __name__ == "__main__":
main()
| 170
|
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class __snake_case :
@staticmethod
def lowerCamelCase ( *_snake_case : Optional[int] , **_snake_case : int):
"""simple docstring"""
pass
def A (__A : Image ) -> str:
"""simple docstring"""
UpperCAmelCase_ = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class __snake_case ( unittest.TestCase ):
UpperCAmelCase__ : Tuple = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def lowerCamelCase ( self : Tuple , _snake_case : Optional[Any] , _snake_case : Optional[Any] , _snake_case : List[str]):
"""simple docstring"""
UpperCAmelCase_ = DepthEstimationPipeline(model=_snake_case , image_processor=_snake_case)
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def lowerCamelCase ( self : str , _snake_case : Optional[int] , _snake_case : List[str]):
"""simple docstring"""
UpperCAmelCase_ = depth_estimator('''./tests/fixtures/tests_samples/COCO/000000039769.png''')
self.assertEqual({'''predicted_depth''': ANY(torch.Tensor), '''depth''': ANY(Image.Image)} , _snake_case)
import datasets
UpperCAmelCase_ = datasets.load_dataset('''hf-internal-testing/fixtures_image_utils''' , '''image''' , split='''test''')
UpperCAmelCase_ = depth_estimator(
[
Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png'''),
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
# RGBA
dataset[0]['''file'''],
# LA
dataset[1]['''file'''],
# L
dataset[2]['''file'''],
])
self.assertEqual(
[
{'''predicted_depth''': ANY(torch.Tensor), '''depth''': ANY(Image.Image)},
{'''predicted_depth''': ANY(torch.Tensor), '''depth''': ANY(Image.Image)},
{'''predicted_depth''': ANY(torch.Tensor), '''depth''': ANY(Image.Image)},
{'''predicted_depth''': ANY(torch.Tensor), '''depth''': ANY(Image.Image)},
{'''predicted_depth''': ANY(torch.Tensor), '''depth''': ANY(Image.Image)},
] , _snake_case , )
@require_tf
@unittest.skip('''Depth estimation is not implemented in TF''')
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
pass
@slow
@require_torch
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = '''Intel/dpt-large'''
UpperCAmelCase_ = pipeline('''depth-estimation''' , model=_snake_case)
UpperCAmelCase_ = depth_estimator('''http://images.cocodataset.org/val2017/000000039769.jpg''')
UpperCAmelCase_ = hashimage(outputs['''depth'''])
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs['''predicted_depth'''].max().item()) , 2_9.3_0_4)
self.assertEqual(nested_simplify(outputs['''predicted_depth'''].min().item()) , 2.6_6_2)
@require_torch
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
self.skipTest('''There is not hf-internal-testing tiny model for either GLPN nor DPT''')
| 51
| 0
|
def A_ ( snake_case : int = 1000 ) -> int:
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase = 1, 1
__UpperCamelCase = []
for i in range(1 , n + 1 ):
__UpperCamelCase = prev_numerator + 2 * prev_denominator
__UpperCamelCase = prev_numerator + prev_denominator
if len(str(__A ) ) > len(str(__A ) ):
result.append(__A )
__UpperCamelCase = numerator
__UpperCamelCase = denominator
return len(__A )
if __name__ == "__main__":
print(F"{solution() = }")
| 328
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
snake_case_ : int = {
"configuration_deberta": ["DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP", "DebertaConfig", "DebertaOnnxConfig"],
"tokenization_deberta": ["DebertaTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : int = ["DebertaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : List[str] = [
"DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"DebertaForMaskedLM",
"DebertaForQuestionAnswering",
"DebertaForSequenceClassification",
"DebertaForTokenClassification",
"DebertaModel",
"DebertaPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Any = [
"TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFDebertaForMaskedLM",
"TFDebertaForQuestionAnswering",
"TFDebertaForSequenceClassification",
"TFDebertaForTokenClassification",
"TFDebertaModel",
"TFDebertaPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig
from .tokenization_deberta import DebertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_deberta_fast import DebertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deberta import (
DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
DebertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deberta import (
TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDebertaForMaskedLM,
TFDebertaForQuestionAnswering,
TFDebertaForSequenceClassification,
TFDebertaForTokenClassification,
TFDebertaModel,
TFDebertaPreTrainedModel,
)
else:
import sys
snake_case_ : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 51
| 0
|
'''simple docstring'''
import copy
import fnmatch
import json
import os
import pickle as pkl
import shutil
import sys
import tarfile
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial
from hashlib import shaaaa
from io import BytesIO
from pathlib import Path
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import cva
import numpy as np
import requests
import wget
from filelock import FileLock
from PIL import Image
from tqdm.auto import tqdm
from yaml import Loader, dump, load
try:
import torch
__lowerCamelCase = True
except ImportError:
__lowerCamelCase = False
try:
from torch.hub import _get_torch_home
__lowerCamelCase = _get_torch_home()
except ImportError:
__lowerCamelCase = os.path.expanduser(
os.getenv('''TORCH_HOME''', os.path.join(os.getenv('''XDG_CACHE_HOME''', '''~/.cache'''), '''torch'''))
)
__lowerCamelCase = os.path.join(torch_cache_home, '''transformers''')
__lowerCamelCase = "https://cdn.huggingface.co"
__lowerCamelCase = "https://s3.amazonaws.com/models.huggingface.co/bert"
__lowerCamelCase = "/".join(str(Path(__file__).resolve()).split('''/''')[:-1])
__lowerCamelCase = os.path.join(PATH, '''config.yaml''')
__lowerCamelCase = os.path.join(PATH, '''attributes.txt''')
__lowerCamelCase = os.path.join(PATH, '''objects.txt''')
__lowerCamelCase = os.getenv('''PYTORCH_PRETRAINED_BERT_CACHE''', default_cache_path)
__lowerCamelCase = os.getenv('''PYTORCH_TRANSFORMERS_CACHE''', PYTORCH_PRETRAINED_BERT_CACHE)
__lowerCamelCase = os.getenv('''TRANSFORMERS_CACHE''', PYTORCH_TRANSFORMERS_CACHE)
__lowerCamelCase = "pytorch_model.bin"
__lowerCamelCase = "config.yaml"
def UpperCAmelCase__ ( UpperCAmelCase__=OBJECTS, UpperCAmelCase__=ATTRIBUTES ) -> Any:
A_ = []
with open(__A ) as f:
for object in f.readlines():
vg_classes.append(object.split(""",""" )[0].lower().strip() )
A_ = []
with open(__A ) as f:
for object in f.readlines():
vg_attrs.append(object.split(""",""" )[0].lower().strip() )
return vg_classes, vg_attrs
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> List[Any]:
A_ = OrderedDict()
with open(__A, """rb""" ) as f:
A_ = pkl.load(__A )["""model"""]
for k in copy.deepcopy(list(ckp.keys() ) ):
A_ = ckp.pop(__A )
if isinstance(__A, np.ndarray ):
A_ = torch.tensor(__A )
else:
assert isinstance(__A, torch.tensor ), type(__A )
A_ = v
return r
class A__ :
lowercase = {}
def __init__( self , UpperCamelCase__ , UpperCamelCase__ = "root" , UpperCamelCase__=0 ) -> int:
'''simple docstring'''
A_ = name
A_ = level
A_ = {}
for k, v in dictionary.items():
if v is None:
raise ValueError()
A_ = copy.deepcopy(_snake_case )
A_ = copy.deepcopy(_snake_case )
if isinstance(_snake_case , _snake_case ):
A_ = Config(_snake_case , name=_snake_case , level=level + 1 )
A_ = v
setattr(self , _snake_case , _snake_case )
A_ = d
def __repr__( self ) -> List[Any]:
'''simple docstring'''
return str(list((self._pointer.keys()) ) )
def __setattr__( self , UpperCamelCase__ , UpperCamelCase__ ) -> str:
'''simple docstring'''
A_ = val
A_ = val
A_ = key.split(""".""" )
A_ = len(_snake_case ) - 1
A_ = self._pointer
if len(_snake_case ) > 1:
for i, l in enumerate(_snake_case ):
if hasattr(self , _snake_case ) and isinstance(getattr(self , _snake_case ) , _snake_case ):
setattr(getattr(self , _snake_case ) , """.""".join(levels[i:] ) , _snake_case )
if l == last_level:
A_ = val
else:
A_ = pointer[l]
def snake_case_ ( self ) -> str:
'''simple docstring'''
return self._pointer
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
with open(f'''{file_name}''' , """w""" ) as stream:
dump(_snake_case , _snake_case )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ ) -> int:
'''simple docstring'''
with open(f'''{file_name}''' , """w""" ) as stream:
json.dump(_snake_case , _snake_case )
@staticmethod
def snake_case_ ( UpperCamelCase__ ) -> Any:
'''simple docstring'''
with open(_snake_case ) as stream:
A_ = load(_snake_case , Loader=_snake_case )
return data
def __str__( self ) -> Optional[int]:
'''simple docstring'''
A_ = """ """
if self._name != "root":
A_ = f'''{t * (self._level-1)}{self._name}:\n'''
else:
A_ = """"""
A_ = self._level
for i, (k, v) in enumerate(self._pointer.items() ):
if isinstance(_snake_case , _snake_case ):
r += f'''{t * (self._level)}{v}\n'''
self._level += 1
else:
r += f'''{t * (self._level)}{k}: {v} ({type(_snake_case ).__name__})\n'''
A_ = level
return r[:-1]
@classmethod
def snake_case_ ( cls , UpperCamelCase__ , **UpperCamelCase__ ) -> Dict:
'''simple docstring'''
A_ , A_ = cls.get_config_dict(_snake_case , **_snake_case )
return cls(_snake_case )
@classmethod
def snake_case_ ( cls , UpperCamelCase__ , **UpperCamelCase__ ) -> Dict:
'''simple docstring'''
A_ = kwargs.pop("""cache_dir""" , _snake_case )
A_ = kwargs.pop("""force_download""" , _snake_case )
A_ = kwargs.pop("""resume_download""" , _snake_case )
A_ = kwargs.pop("""proxies""" , _snake_case )
A_ = kwargs.pop("""local_files_only""" , _snake_case )
if os.path.isdir(_snake_case ):
A_ = os.path.join(_snake_case , _snake_case )
elif os.path.isfile(_snake_case ) or is_remote_url(_snake_case ):
A_ = pretrained_model_name_or_path
else:
A_ = hf_bucket_url(_snake_case , filename=_snake_case , use_cdn=_snake_case )
try:
# Load from URL or cache if already cached
A_ = cached_path(
_snake_case , cache_dir=_snake_case , force_download=_snake_case , proxies=_snake_case , resume_download=_snake_case , local_files_only=_snake_case , )
# Load config dict
if resolved_config_file is None:
raise EnvironmentError
A_ = Config.load_yaml(_snake_case )
except EnvironmentError:
A_ = """Can\'t load config for"""
raise EnvironmentError(_snake_case )
if resolved_config_file == config_file:
print("""loading configuration file from path""" )
else:
print("""loading configuration file cache""" )
return Config.load_yaml(_snake_case ), kwargs
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> int:
A_ = torch.load("""dump.pt""", map_location=in_tensor.device )
A_ = in_tensor.numpy()
A_ = out_tensor.numpy()[0]
print(na.shape, na[0, 0, :5] )
print(na.shape, na[0, 0, :5] )
assert np.allclose(__A, __A, rtol=0.01, atol=0.1 ), (
F'''{sum([1 for x in np.isclose(__A, __A, rtol=0.01, atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*1_00:.4f} %'''
" element-wise mismatch"
)
raise Exception("""tensors are all good""" )
# Hugging face functions below
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> int:
A_ = urlparse(__A )
return parsed.scheme in ("http", "https")
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__=True ) -> str:
A_ = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX
A_ = """/""" not in model_id
if legacy_format:
return F'''{endpoint}/{model_id}-{filename}'''
else:
return F'''{endpoint}/{model_id}/{filename}'''
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__=None, UpperCAmelCase__=0, UpperCAmelCase__=None, ) -> Any:
A_ = """python/{}""".format(sys.version.split()[0] )
if _torch_available:
ua += "; torch/{}".format(torch.__version__ )
if isinstance(__A, __A ):
ua += "; " + "; ".join("""{}/{}""".format(__A, __A ) for k, v in user_agent.items() )
elif isinstance(__A, __A ):
ua += "; " + user_agent
A_ = {"""user-agent""": ua}
if resume_size > 0:
A_ = """bytes=%d-""" % (resume_size,)
A_ = requests.get(__A, stream=__A, proxies=__A, headers=__A )
if response.status_code == 4_16: # Range not satisfiable
return
A_ = response.headers.get("""Content-Length""" )
A_ = resume_size + int(__A ) if content_length is not None else None
A_ = tqdm(
unit="""B""", unit_scale=__A, total=__A, initial=__A, desc="""Downloading""", )
for chunk in response.iter_content(chunk_size=10_24 ):
if chunk: # filter out keep-alive new chunks
progress.update(len(__A ) )
temp_file.write(__A )
progress.close()
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__=None, UpperCAmelCase__=False, UpperCAmelCase__=None, UpperCAmelCase__=10, UpperCAmelCase__=False, UpperCAmelCase__=None, UpperCAmelCase__=False, ) -> Any:
if cache_dir is None:
A_ = TRANSFORMERS_CACHE
if isinstance(__A, __A ):
A_ = str(__A )
os.makedirs(__A, exist_ok=__A )
A_ = None
if not local_files_only:
try:
A_ = requests.head(__A, allow_redirects=__A, proxies=__A, timeout=__A )
if response.status_code == 2_00:
A_ = response.headers.get("""ETag""" )
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
A_ = url_to_filename(__A, __A )
# get cache path to put the file
A_ = os.path.join(__A, __A )
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(__A ):
return cache_path
else:
A_ = [
file
for file in fnmatch.filter(os.listdir(__A ), filename + """.*""" )
if not file.endswith(""".json""" ) and not file.endswith(""".lock""" )
]
if len(__A ) > 0:
return os.path.join(__A, matching_files[-1] )
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
"""Cannot find the requested files in the cached path and outgoing traffic has been"""
""" disabled. To enable model look-ups and downloads online, set \'local_files_only\'"""
""" to False.""" )
return None
# From now on, etag is not None.
if os.path.exists(__A ) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
A_ = cache_path + """.lock"""
with FileLock(__A ):
# If the download just completed while the lock was activated.
if os.path.exists(__A ) and not force_download:
# Even if returning early like here, the lock will be released.
return cache_path
if resume_download:
A_ = cache_path + """.incomplete"""
@contextmanager
def _resumable_file_manager():
with open(__A, """a+b""" ) as f:
yield f
A_ = _resumable_file_manager
if os.path.exists(__A ):
A_ = os.stat(__A ).st_size
else:
A_ = 0
else:
A_ = partial(tempfile.NamedTemporaryFile, dir=__A, delete=__A )
A_ = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
print(
"""%s not found in cache or force_download set to True, downloading to %s""", __A, temp_file.name, )
http_get(
__A, __A, proxies=__A, resume_size=__A, user_agent=__A, )
os.replace(temp_file.name, __A )
A_ = {"""url""": url, """etag""": etag}
A_ = cache_path + """.json"""
with open(__A, """w""" ) as meta_file:
json.dump(__A, __A )
return cache_path
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__=None ) -> List[Any]:
A_ = url.encode("""utf-8""" )
A_ = shaaaa(__A )
A_ = url_hash.hexdigest()
if etag:
A_ = etag.encode("""utf-8""" )
A_ = shaaaa(__A )
filename += "." + etag_hash.hexdigest()
if url.endswith(""".h5""" ):
filename += ".h5"
return filename
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__=None, UpperCAmelCase__=False, UpperCAmelCase__=None, UpperCAmelCase__=False, UpperCAmelCase__=None, UpperCAmelCase__=False, UpperCAmelCase__=False, UpperCAmelCase__=False, ) -> Any:
if cache_dir is None:
A_ = TRANSFORMERS_CACHE
if isinstance(__A, __A ):
A_ = str(__A )
if isinstance(__A, __A ):
A_ = str(__A )
if is_remote_url(__A ):
# URL, so get it from the cache (downloading if necessary)
A_ = get_from_cache(
__A, cache_dir=__A, force_download=__A, proxies=__A, resume_download=__A, user_agent=__A, local_files_only=__A, )
elif os.path.exists(__A ):
# File, and it exists.
A_ = url_or_filename
elif urlparse(__A ).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError("""file {} not found""".format(__A ) )
else:
# Something unknown
raise ValueError("""unable to parse {} as a URL or as a local path""".format(__A ) )
if extract_compressed_file:
if not is_zipfile(__A ) and not tarfile.is_tarfile(__A ):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
A_ , A_ = os.path.split(__A )
A_ = output_file.replace(""".""", """-""" ) + """-extracted"""
A_ = os.path.join(__A, __A )
if os.path.isdir(__A ) and os.listdir(__A ) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
A_ = output_path + """.lock"""
with FileLock(__A ):
shutil.rmtree(__A, ignore_errors=__A )
os.makedirs(__A )
if is_zipfile(__A ):
with ZipFile(__A, """r""" ) as zip_file:
zip_file.extractall(__A )
zip_file.close()
elif tarfile.is_tarfile(__A ):
A_ = tarfile.open(__A )
tar_file.extractall(__A )
tar_file.close()
else:
raise EnvironmentError("""Archive format of {} could not be identified""".format(__A ) )
return output_path_extracted
return output_path
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__="," ) -> Union[str, Any]:
assert isinstance(__A, __A )
if os.path.isfile(__A ):
with open(__A ) as f:
A_ = eval(f.read() )
else:
A_ = requests.get(__A )
try:
A_ = requests.json()
except Exception:
A_ = req.content.decode()
assert data is not None, "could not connect"
try:
A_ = eval(__A )
except Exception:
A_ = data.split("""\n""" )
req.close()
return data
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Any:
A_ = requests.get(__A )
A_ = np.array(Image.open(BytesIO(response.content ) ) )
return img
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Tuple:
A_ = url.split("""/""" )[-1]
if fn not in os.listdir(os.getcwd() ):
wget.download(__A )
with open(__A, """rb""" ) as stream:
A_ = pkl.load(__A )
A_ = weights.pop("""model""" )
A_ = {}
for k, v in model.items():
A_ = torch.from_numpy(__A )
if "running_var" in k:
A_ = torch.tensor([0] )
A_ = k.replace("""running_var""", """num_batches_tracked""" )
A_ = zero
return new
def UpperCAmelCase__ ( ) -> Optional[int]:
print(F'''{os.path.abspath(os.path.join(__A, os.pardir ) )}/demo.ipynb''' )
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__="RGB" ) -> int:
assert isinstance(__A, __A )
if os.path.isfile(__A ):
A_ = cva.imread(__A )
else:
A_ = get_image_from_url(__A )
assert img is not None, F'''could not connect to: {im}'''
A_ = cva.cvtColor(__A, cva.COLOR_BGR2RGB )
if input_format == "RGB":
A_ = img[:, :, ::-1]
return img
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__=1 ) -> Union[str, Any]:
return (images[i : i + batch] for i in range(0, len(__A ), __A ))
| 162
|
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
snake_case_ : Union[str, Any] = {"configuration_gpt_neox": ["GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoXConfig"]}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Dict = ["GPTNeoXTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : str = [
"GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTNeoXForCausalLM",
"GPTNeoXForQuestionAnswering",
"GPTNeoXForSequenceClassification",
"GPTNeoXForTokenClassification",
"GPTNeoXLayer",
"GPTNeoXModel",
"GPTNeoXPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox import (
GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXLayer,
GPTNeoXModel,
GPTNeoXPreTrainedModel,
)
else:
import sys
snake_case_ : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 51
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowercase : str = {
"configuration_xlm_roberta": [
"XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP",
"XLMRobertaConfig",
"XLMRobertaOnnxConfig",
],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Dict = ["XLMRobertaTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Optional[Any] = ["XLMRobertaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Optional[Any] = [
"XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"XLMRobertaForCausalLM",
"XLMRobertaForMaskedLM",
"XLMRobertaForMultipleChoice",
"XLMRobertaForQuestionAnswering",
"XLMRobertaForSequenceClassification",
"XLMRobertaForTokenClassification",
"XLMRobertaModel",
"XLMRobertaPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : List[Any] = [
"TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXLMRobertaForCausalLM",
"TFXLMRobertaForMaskedLM",
"TFXLMRobertaForMultipleChoice",
"TFXLMRobertaForQuestionAnswering",
"TFXLMRobertaForSequenceClassification",
"TFXLMRobertaForTokenClassification",
"TFXLMRobertaModel",
"TFXLMRobertaPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Union[str, Any] = [
"FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"FlaxXLMRobertaForMaskedLM",
"FlaxXLMRobertaForCausalLM",
"FlaxXLMRobertaForMultipleChoice",
"FlaxXLMRobertaForQuestionAnswering",
"FlaxXLMRobertaForSequenceClassification",
"FlaxXLMRobertaForTokenClassification",
"FlaxXLMRobertaModel",
"FlaxXLMRobertaPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaConfig,
XLMRobertaOnnxConfig,
)
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta import XLMRobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaForCausalLM,
XLMRobertaForMaskedLM,
XLMRobertaForMultipleChoice,
XLMRobertaForQuestionAnswering,
XLMRobertaForSequenceClassification,
XLMRobertaForTokenClassification,
XLMRobertaModel,
XLMRobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm_roberta import (
TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMRobertaForCausalLM,
TFXLMRobertaForMaskedLM,
TFXLMRobertaForMultipleChoice,
TFXLMRobertaForQuestionAnswering,
TFXLMRobertaForSequenceClassification,
TFXLMRobertaForTokenClassification,
TFXLMRobertaModel,
TFXLMRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xlm_roberta import (
FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxXLMRobertaForCausalLM,
FlaxXLMRobertaForMaskedLM,
FlaxXLMRobertaForMultipleChoice,
FlaxXLMRobertaForQuestionAnswering,
FlaxXLMRobertaForSequenceClassification,
FlaxXLMRobertaForTokenClassification,
FlaxXLMRobertaModel,
FlaxXLMRobertaPreTrainedModel,
)
else:
import sys
__lowercase : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 318
|
def A (__A : list , __A : int , __A : int = 0 , __A : int = 0 ) -> int:
"""simple docstring"""
UpperCAmelCase_ = right or len(__A ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(__A , __A , left + 1 , right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 51
| 0
|
def _A ( _lowercase , _lowercase , _lowercase ) -> bool:
"""simple docstring"""
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(__A ) )
def _A ( _lowercase , _lowercase , _lowercase , _lowercase ) -> bool:
"""simple docstring"""
if index == len(__A ):
return True
# Recursive Step
for i in range(__A ):
if valid_coloring(graph[index] , __A , __A ):
# Color current vertex
__UpperCamelCase = i
# Validate coloring
if util_color(__A , __A , __A , index + 1 ):
return True
# Backtrack
__UpperCamelCase = -1
return False
def _A ( _lowercase , _lowercase ) -> list[int]:
"""simple docstring"""
__UpperCamelCase = [-1] * len(__A )
if util_color(__A , __A , __A , 0 ):
return colored_vertices
return []
| 310
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ : int = logging.get_logger(__name__)
snake_case_ : str = {}
class __snake_case ( a ):
UpperCAmelCase__ : str = '''llama'''
UpperCAmelCase__ : Dict = ['''past_key_values''']
def __init__( self : str , _snake_case : List[str]=32000 , _snake_case : int=4096 , _snake_case : List[str]=11008 , _snake_case : Optional[int]=32 , _snake_case : List[Any]=32 , _snake_case : Tuple=None , _snake_case : int="silu" , _snake_case : List[Any]=2048 , _snake_case : List[str]=0.0_2 , _snake_case : Any=1e-6 , _snake_case : List[str]=True , _snake_case : Optional[Any]=0 , _snake_case : Dict=1 , _snake_case : List[Any]=2 , _snake_case : str=1 , _snake_case : Union[str, Any]=False , _snake_case : str=None , **_snake_case : List[Any] , ):
"""simple docstring"""
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = num_key_value_heads
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = rms_norm_eps
UpperCAmelCase_ = pretraining_tp
UpperCAmelCase_ = use_cache
UpperCAmelCase_ = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=_snake_case , bos_token_id=_snake_case , eos_token_id=_snake_case , tie_word_embeddings=_snake_case , **_snake_case , )
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , _snake_case) or len(self.rope_scaling) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
F"""got {self.rope_scaling}""")
UpperCAmelCase_ = self.rope_scaling.get('''type''' , _snake_case)
UpperCAmelCase_ = self.rope_scaling.get('''factor''' , _snake_case)
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""")
if rope_scaling_factor is None or not isinstance(_snake_case , _snake_case) or rope_scaling_factor <= 1.0:
raise ValueError(F"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""")
| 51
| 0
|
"""simple docstring"""
import os
from bleurt import score # From: git+https://github.com/google-research/bleurt.git
import datasets
lowercase__ = datasets.logging.get_logger(__name__)
lowercase__ = "\\n@inproceedings{bleurt,\n title={BLEURT: Learning Robust Metrics for Text Generation},\n author={Thibault Sellam and Dipanjan Das and Ankur P. Parikh},\n booktitle={ACL},\n year={2020},\n url={https://arxiv.org/abs/2004.04696}\n}\n"
lowercase__ = "\\nBLEURT a learnt evaluation metric for Natural Language Generation. It is built using multiple phases of transfer learning starting from a pretrained BERT model (Devlin et al. 2018)\nand then employing another pre-training phrase using synthetic data. Finally it is trained on WMT human annotations. You may run BLEURT out-of-the-box or fine-tune\nit for your specific application (the latter is expected to perform better).\n\nSee the project's README at https://github.com/google-research/bleurt#readme for more information.\n"
lowercase__ = "\nBLEURT score.\n\nArgs:\n `predictions` (list of str): prediction/candidate sentences\n `references` (list of str): reference sentences\n `checkpoint` BLEURT checkpoint. Will default to BLEURT-tiny if None.\n\nReturns:\n 'scores': List of scores.\nExamples:\n\n >>> predictions = [\"hello there\", \"general kenobi\"]\n >>> references = [\"hello there\", \"general kenobi\"]\n >>> bleurt = datasets.load_metric(\"bleurt\")\n >>> results = bleurt.compute(predictions=predictions, references=references)\n >>> print([round(v, 2) for v in results[\"scores\"]])\n [1.03, 1.04]\n"
lowercase__ = {
"bleurt-tiny-128": "https://storage.googleapis.com/bleurt-oss/bleurt-tiny-128.zip",
"bleurt-tiny-512": "https://storage.googleapis.com/bleurt-oss/bleurt-tiny-512.zip",
"bleurt-base-128": "https://storage.googleapis.com/bleurt-oss/bleurt-base-128.zip",
"bleurt-base-512": "https://storage.googleapis.com/bleurt-oss/bleurt-base-512.zip",
"bleurt-large-128": "https://storage.googleapis.com/bleurt-oss/bleurt-large-128.zip",
"bleurt-large-512": "https://storage.googleapis.com/bleurt-oss/bleurt-large-512.zip",
"BLEURT-20-D3": "https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D3.zip",
"BLEURT-20-D6": "https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D6.zip",
"BLEURT-20-D12": "https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D12.zip",
"BLEURT-20": "https://storage.googleapis.com/bleurt-oss-21/BLEURT-20.zip",
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCamelCase ( datasets.Metric ):
'''simple docstring'''
def lowerCamelCase ( self : str ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="https://github.com/google-research/bleurt" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/google-research/bleurt"] , reference_urls=["https://github.com/google-research/bleurt", "https://arxiv.org/abs/2004.04696"] , )
def lowerCamelCase ( self : Optional[Any] , a_ : Dict ):
if self.config_name == "default":
logger.warning(
"Using default BLEURT-Base checkpoint for sequence maximum length 128. "
"You can use a bigger model for better results with e.g.: datasets.load_metric(\'bleurt\', \'bleurt-large-512\')." )
lowerCAmelCase_ : int = "bleurt-base-128"
if self.config_name.lower() in CHECKPOINT_URLS:
lowerCAmelCase_ : Union[str, Any] = self.config_name.lower()
elif self.config_name.upper() in CHECKPOINT_URLS:
lowerCAmelCase_ : int = self.config_name.upper()
else:
raise KeyError(
f'''{self.config_name} model not found. You should supply the name of a model checkpoint for bleurt in {CHECKPOINT_URLS.keys()}''' )
# download the model checkpoint specified by self.config_name and set up the scorer
lowerCAmelCase_ : Dict = dl_manager.download_and_extract(CHECKPOINT_URLS[checkpoint_name] )
lowerCAmelCase_ : Optional[Any] = score.BleurtScorer(os.path.join(_snake_case , _snake_case ) )
def lowerCamelCase ( self : Any , a_ : int , a_ : List[Any] ):
lowerCAmelCase_ : Tuple = self.scorer.score(references=_snake_case , candidates=_snake_case )
return {"scores": scores}
| 241
|
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
snake_case_ : List[str] = logging.get_logger(__name__)
snake_case_ : Tuple = {
"Salesforce/codegen-350M-nl": "https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json",
"Salesforce/codegen-350M-multi": "https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json",
"Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json",
"Salesforce/codegen-2B-nl": "https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json",
"Salesforce/codegen-2B-multi": "https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json",
"Salesforce/codegen-2B-mono": "https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json",
"Salesforce/codegen-6B-nl": "https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json",
"Salesforce/codegen-6B-multi": "https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json",
"Salesforce/codegen-6B-mono": "https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json",
"Salesforce/codegen-16B-nl": "https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json",
"Salesforce/codegen-16B-multi": "https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json",
"Salesforce/codegen-16B-mono": "https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json",
}
class __snake_case ( a ):
UpperCAmelCase__ : str = '''codegen'''
UpperCAmelCase__ : int = {
'''max_position_embeddings''': '''n_positions''',
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self : Union[str, Any] , _snake_case : Union[str, Any]=50400 , _snake_case : Optional[int]=2048 , _snake_case : Union[str, Any]=2048 , _snake_case : List[str]=4096 , _snake_case : Any=28 , _snake_case : List[str]=16 , _snake_case : int=64 , _snake_case : Tuple=None , _snake_case : Dict="gelu_new" , _snake_case : Union[str, Any]=0.0 , _snake_case : Optional[Any]=0.0 , _snake_case : List[Any]=0.0 , _snake_case : List[Any]=1e-5 , _snake_case : List[str]=0.0_2 , _snake_case : Optional[Any]=True , _snake_case : int=50256 , _snake_case : Tuple=50256 , _snake_case : int=False , **_snake_case : Any , ):
"""simple docstring"""
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = n_ctx
UpperCAmelCase_ = n_positions
UpperCAmelCase_ = n_embd
UpperCAmelCase_ = n_layer
UpperCAmelCase_ = n_head
UpperCAmelCase_ = n_inner
UpperCAmelCase_ = rotary_dim
UpperCAmelCase_ = activation_function
UpperCAmelCase_ = resid_pdrop
UpperCAmelCase_ = embd_pdrop
UpperCAmelCase_ = attn_pdrop
UpperCAmelCase_ = layer_norm_epsilon
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = use_cache
UpperCAmelCase_ = bos_token_id
UpperCAmelCase_ = eos_token_id
super().__init__(
bos_token_id=_snake_case , eos_token_id=_snake_case , tie_word_embeddings=_snake_case , **_snake_case)
class __snake_case ( a ):
def __init__( self : Tuple , _snake_case : PretrainedConfig , _snake_case : str = "default" , _snake_case : List[PatchingSpec] = None , _snake_case : bool = False , ):
"""simple docstring"""
super().__init__(_snake_case , task=_snake_case , patching_specs=_snake_case , use_past=_snake_case)
if not getattr(self._config , '''pad_token_id''' , _snake_case):
# TODO: how to do that better?
UpperCAmelCase_ = 0
@property
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}})
if self.use_past:
self.fill_with_past_key_values_(_snake_case , direction='''inputs''')
UpperCAmelCase_ = {0: '''batch''', 1: '''past_sequence + sequence'''}
else:
UpperCAmelCase_ = {0: '''batch''', 1: '''sequence'''}
return common_inputs
@property
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
return self._config.n_layer
@property
def lowerCamelCase ( self : int):
"""simple docstring"""
return self._config.n_head
def lowerCamelCase ( self : Optional[int] , _snake_case : PreTrainedTokenizer , _snake_case : int = -1 , _snake_case : int = -1 , _snake_case : bool = False , _snake_case : Optional[TensorType] = None , ):
"""simple docstring"""
UpperCAmelCase_ = super(_snake_case , self).generate_dummy_inputs(
_snake_case , batch_size=_snake_case , seq_length=_snake_case , is_pair=_snake_case , framework=_snake_case)
# We need to order the input in the way they appears in the forward()
UpperCAmelCase_ = OrderedDict({'''input_ids''': common_inputs['''input_ids''']})
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''')
else:
import torch
UpperCAmelCase_ , UpperCAmelCase_ = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
UpperCAmelCase_ = seqlen + 2
UpperCAmelCase_ = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
UpperCAmelCase_ = [
(torch.zeros(_snake_case), torch.zeros(_snake_case)) for _ in range(self.num_layers)
]
UpperCAmelCase_ = common_inputs['''attention_mask''']
if self.use_past:
UpperCAmelCase_ = ordered_inputs['''attention_mask'''].dtype
UpperCAmelCase_ = torch.cat(
[ordered_inputs['''attention_mask'''], torch.ones(_snake_case , _snake_case , dtype=_snake_case)] , dim=1)
return ordered_inputs
@property
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
return 13
| 51
| 0
|
'''simple docstring'''
from PIL import Image
def lowerCAmelCase (__A , __A):
"""simple docstring"""
def brightness(__A) -> float:
return 128 + level + (c - 128)
if not -2_55.0 <= level <= 2_55.0:
raise ValueError('''level must be between -255.0 (black) and 255.0 (white)''')
return img.point(__A)
if __name__ == "__main__":
# Load image
with Image.open("image_data/lena.jpg") as img:
# Change brightness to 100
lowercase_ = change_brightness(img, 100)
brigt_img.save("image_data/lena_brightness.png", format="png")
| 211
|
import os
import unittest
from transformers.models.phobert.tokenization_phobert import VOCAB_FILES_NAMES, PhobertTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class __snake_case ( a , unittest.TestCase ):
UpperCAmelCase__ : Any = PhobertTokenizer
UpperCAmelCase__ : List[str] = False
def lowerCamelCase ( self : str):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCAmelCase_ = ['''T@@''', '''i''', '''I''', '''R@@''', '''r''', '''e@@''']
UpperCAmelCase_ = dict(zip(_snake_case , range(len(_snake_case))))
UpperCAmelCase_ = ['''#version: 0.2''', '''l à</w>''']
UpperCAmelCase_ = {'''unk_token''': '''<unk>'''}
UpperCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''])
UpperCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''])
with open(self.vocab_file , '''w''' , encoding='''utf-8''') as fp:
for token in vocab_tokens:
fp.write(F"""{token} {vocab_tokens[token]}\n""")
with open(self.merges_file , '''w''' , encoding='''utf-8''') as fp:
fp.write('''\n'''.join(_snake_case))
def lowerCamelCase ( self : int , **_snake_case : Any):
"""simple docstring"""
kwargs.update(self.special_tokens_map)
return PhobertTokenizer.from_pretrained(self.tmpdirname , **_snake_case)
def lowerCamelCase ( self : Optional[Any] , _snake_case : Union[str, Any]):
"""simple docstring"""
UpperCAmelCase_ = '''Tôi là VinAI Research'''
UpperCAmelCase_ = '''T<unk> i <unk> <unk> <unk> <unk> <unk> <unk> I Re<unk> e<unk> <unk> <unk> <unk>'''
return input_text, output_text
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = PhobertTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map)
UpperCAmelCase_ = '''Tôi là VinAI Research'''
UpperCAmelCase_ = '''T@@ ô@@ i l@@ à V@@ i@@ n@@ A@@ I R@@ e@@ s@@ e@@ a@@ r@@ c@@ h'''.split()
UpperCAmelCase_ = tokenizer.tokenize(_snake_case)
print(_snake_case)
self.assertListEqual(_snake_case , _snake_case)
UpperCAmelCase_ = tokens + [tokenizer.unk_token]
UpperCAmelCase_ = [4, 3, 5, 3, 3, 3, 3, 3, 3, 6, 7, 9, 3, 9, 3, 3, 3, 3, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_snake_case) , _snake_case)
| 51
| 0
|
import copy
import random
from transformers import CLIPTokenizer
class a__ ( A__ ):
def __init__( self : List[Any],*_A : List[str],**_A : List[str] ):
"""simple docstring"""
super().__init__(*_snake_case,**_snake_case )
SCREAMING_SNAKE_CASE_ : Optional[int] = {}
def __UpperCamelCase ( self : Any,_A : List[Any],*_A : List[Any],**_A : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = super().add_tokens(_snake_case,*_snake_case,**_snake_case )
if num_added_tokens == 0:
raise ValueError(
F'The tokenizer already contains the token {placeholder_token}. Please pass a different'
" `placeholder_token` that is not already in the tokenizer." )
def __UpperCamelCase ( self : Any,_A : Dict,*_A : int,_A : Optional[int]=1,**_A : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = []
if num_vec_per_token == 1:
self.try_adding_tokens(_snake_case,*_snake_case,**_snake_case )
output.append(_snake_case )
else:
SCREAMING_SNAKE_CASE_ : Optional[int] = []
for i in range(_snake_case ):
SCREAMING_SNAKE_CASE_ : Any = placeholder_token + F'_{i}'
self.try_adding_tokens(_snake_case,*_snake_case,**_snake_case )
output.append(_snake_case )
# handle cases where there is a new placeholder token that contains the current placeholder token but is larger
for token in self.token_map:
if token in placeholder_token:
raise ValueError(
F'The tokenizer already has placeholder token {token} that can get confused with'
F' {placeholder_token}keep placeholder tokens independent' )
SCREAMING_SNAKE_CASE_ : Any = output
def __UpperCamelCase ( self : int,_A : str,_A : int=False,_A : Optional[int]=1.0 ):
"""simple docstring"""
if isinstance(_snake_case,_snake_case ):
SCREAMING_SNAKE_CASE_ : List[Any] = []
for i in range(len(_snake_case ) ):
output.append(self.replace_placeholder_tokens_in_text(text[i],vector_shuffle=_snake_case ) )
return output
for placeholder_token in self.token_map:
if placeholder_token in text:
SCREAMING_SNAKE_CASE_ : str = self.token_map[placeholder_token]
SCREAMING_SNAKE_CASE_ : Optional[Any] = tokens[: 1 + int(len(_snake_case ) * prop_tokens_to_load )]
if vector_shuffle:
SCREAMING_SNAKE_CASE_ : List[Any] = copy.copy(_snake_case )
random.shuffle(_snake_case )
SCREAMING_SNAKE_CASE_ : Dict = text.replace(_snake_case," ".join(_snake_case ) )
return text
def __call__( self : Tuple,_A : List[Any],*_A : Union[str, Any],_A : Optional[Any]=False,_A : Tuple=1.0,**_A : List[str] ):
"""simple docstring"""
return super().__call__(
self.replace_placeholder_tokens_in_text(
_snake_case,vector_shuffle=_snake_case,prop_tokens_to_load=_snake_case ),*_snake_case,**_snake_case,)
def __UpperCamelCase ( self : Any,_A : str,*_A : Optional[Any],_A : Tuple=False,_A : Optional[Any]=1.0,**_A : Tuple ):
"""simple docstring"""
return super().encode(
self.replace_placeholder_tokens_in_text(
_snake_case,vector_shuffle=_snake_case,prop_tokens_to_load=_snake_case ),*_snake_case,**_snake_case,)
| 18
|
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
snake_case_ : Any = logging.get_logger(__name__)
snake_case_ : Optional[int] = TypeVar("DatasetType", Dataset, IterableDataset)
def A (__A : List[DatasetType] , __A : Optional[List[float]] = None , __A : Optional[int] = None , __A : Optional[DatasetInfo] = None , __A : Optional[NamedSplit] = None , __A : Literal["first_exhausted", "all_exhausted"] = "first_exhausted" , ) -> DatasetType:
"""simple docstring"""
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError('''Unable to interleave an empty list of datasets.''' )
for i, dataset in enumerate(__A ):
if not isinstance(__A , (Dataset, IterableDataset) ):
if isinstance(__A , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} """
'''is an empty dataset dictionary.''' )
raise ValueError(
F"""Dataset at position {i} has at least one split: {list(__A )}\n"""
F"""Please pick one to interleave with the other datasets, for example: dataset['{next(iter(__A ) )}']""" )
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(__A ).__name__}.""" )
if i == 0:
UpperCAmelCase_ , UpperCAmelCase_ = (
(Dataset, IterableDataset) if isinstance(__A , __A ) else (IterableDataset, Dataset)
)
elif not isinstance(__A , __A ):
raise ValueError(
F"""Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.""" )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(F"""{stopping_strategy} is not supported. Please enter a valid stopping_strategy.""" )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
__A , __A , __A , info=__A , split=__A , stopping_strategy=__A )
else:
return _interleave_iterable_datasets(
__A , __A , __A , info=__A , split=__A , stopping_strategy=__A )
def A (__A : List[DatasetType] , __A : Optional[DatasetInfo] = None , __A : Optional[NamedSplit] = None , __A : int = 0 , ) -> DatasetType:
"""simple docstring"""
if not dsets:
raise ValueError('''Unable to concatenate an empty list of datasets.''' )
for i, dataset in enumerate(__A ):
if not isinstance(__A , (Dataset, IterableDataset) ):
if isinstance(__A , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} """
'''is an empty dataset dictionary.''' )
raise ValueError(
F"""Dataset at position {i} has at least one split: {list(__A )}\n"""
F"""Please pick one to interleave with the other datasets, for example: dataset['{next(iter(__A ) )}']""" )
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(__A ).__name__}.""" )
if i == 0:
UpperCAmelCase_ , UpperCAmelCase_ = (
(Dataset, IterableDataset) if isinstance(__A , __A ) else (IterableDataset, Dataset)
)
elif not isinstance(__A , __A ):
raise ValueError(
F"""Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.""" )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(__A , info=__A , split=__A , axis=__A )
else:
return _concatenate_iterable_datasets(__A , info=__A , split=__A , axis=__A )
| 51
| 0
|
"""simple docstring"""
import os
import unittest
from transformers import BatchEncoding
from transformers.models.bert.tokenization_bert import (
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer
from transformers.testing_utils import require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
class _SCREAMING_SNAKE_CASE ( A__ , unittest.TestCase ):
UpperCAmelCase_ :int = ProphetNetTokenizer
UpperCAmelCase_ :List[str] = False
def __lowerCAmelCase ( self ) -> List[Any]:
super().setUp()
lowerCAmelCase_ :Any = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
lowerCAmelCase_ :int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def __lowerCAmelCase ( self , __A ) -> Dict:
lowerCAmelCase_ :Optional[Any] = """UNwant\u00E9d,running"""
lowerCAmelCase_ :Any = """unwanted, running"""
return input_text, output_text
def __lowerCAmelCase ( self ) -> Optional[Any]:
lowerCAmelCase_ :List[Any] = self.tokenizer_class(self.vocab_file )
lowerCAmelCase_ :Any = tokenizer.tokenize("""UNwant\u00E9d,running""" )
self.assertListEqual(_snake_case , ["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_snake_case ) , [9, 6, 7, 12, 10, 11] )
def __lowerCAmelCase ( self ) -> int:
lowerCAmelCase_ :Optional[Any] = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("""ah\u535A\u63A8zz""" ) , ["""ah""", """\u535A""", """\u63A8""", """zz"""] )
def __lowerCAmelCase ( self ) -> Tuple:
lowerCAmelCase_ :Tuple = BasicTokenizer(do_lower_case=_snake_case )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""hello""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def __lowerCAmelCase ( self ) -> Any:
lowerCAmelCase_ :Optional[int] = BasicTokenizer(do_lower_case=_snake_case , strip_accents=_snake_case )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hällo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""h\u00E9llo"""] )
def __lowerCAmelCase ( self ) -> Any:
lowerCAmelCase_ :Union[str, Any] = BasicTokenizer(do_lower_case=_snake_case , strip_accents=_snake_case )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def __lowerCAmelCase ( self ) -> List[Any]:
lowerCAmelCase_ :Tuple = BasicTokenizer(do_lower_case=_snake_case )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def __lowerCAmelCase ( self ) -> int:
lowerCAmelCase_ :str = BasicTokenizer(do_lower_case=_snake_case )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def __lowerCAmelCase ( self ) -> Optional[Any]:
lowerCAmelCase_ :str = BasicTokenizer(do_lower_case=_snake_case , strip_accents=_snake_case )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HäLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def __lowerCAmelCase ( self ) -> List[Any]:
lowerCAmelCase_ :Union[str, Any] = BasicTokenizer(do_lower_case=_snake_case , strip_accents=_snake_case )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HaLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def __lowerCAmelCase ( self ) -> str:
lowerCAmelCase_ :List[str] = BasicTokenizer(do_lower_case=_snake_case , never_split=["""[UNK]"""] )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? [UNK]""" ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?""", """[UNK]"""] )
def __lowerCAmelCase ( self ) -> Tuple:
lowerCAmelCase_ :Any = ["""[UNK]""", """[CLS]""", """[SEP]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing"""]
lowerCAmelCase_ :Any = {}
for i, token in enumerate(_snake_case ):
lowerCAmelCase_ :str = i
lowerCAmelCase_ :int = WordpieceTokenizer(vocab=_snake_case , unk_token="""[UNK]""" )
self.assertListEqual(tokenizer.tokenize("""""" ) , [] )
self.assertListEqual(tokenizer.tokenize("""unwanted running""" ) , ["""un""", """##want""", """##ed""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.tokenize("""unwantedX running""" ) , ["""[UNK]""", """runn""", """##ing"""] )
@require_torch
def __lowerCAmelCase ( self ) -> List[str]:
lowerCAmelCase_ :Optional[int] = self.tokenizer_class.from_pretrained("""microsoft/prophetnet-large-uncased""" )
lowerCAmelCase_ :int = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
lowerCAmelCase_ :List[str] = [1037, 2146, 2_0423, 2005, 7680, 7849, 3989, 1012, 102]
lowerCAmelCase_ :Union[str, Any] = tokenizer(_snake_case , padding=_snake_case , return_tensors="""pt""" )
self.assertIsInstance(_snake_case , _snake_case )
lowerCAmelCase_ :Union[str, Any] = list(batch.input_ids.numpy()[0] )
self.assertListEqual(_snake_case , _snake_case )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
self.assertTrue(_is_whitespace(""" """ ) )
self.assertTrue(_is_whitespace("""\t""" ) )
self.assertTrue(_is_whitespace("""\r""" ) )
self.assertTrue(_is_whitespace("""\n""" ) )
self.assertTrue(_is_whitespace("""\u00A0""" ) )
self.assertFalse(_is_whitespace("""A""" ) )
self.assertFalse(_is_whitespace("""-""" ) )
def __lowerCAmelCase ( self ) -> Any:
self.assertTrue(_is_control("""\u0005""" ) )
self.assertFalse(_is_control("""A""" ) )
self.assertFalse(_is_control(""" """ ) )
self.assertFalse(_is_control("""\t""" ) )
self.assertFalse(_is_control("""\r""" ) )
def __lowerCAmelCase ( self ) -> Tuple:
self.assertTrue(_is_punctuation("""-""" ) )
self.assertTrue(_is_punctuation("""$""" ) )
self.assertTrue(_is_punctuation("""`""" ) )
self.assertTrue(_is_punctuation(""".""" ) )
self.assertFalse(_is_punctuation("""A""" ) )
self.assertFalse(_is_punctuation(""" """ ) )
@slow
def __lowerCAmelCase ( self ) -> str:
lowerCAmelCase_ :List[Any] = self.tokenizer_class.from_pretrained("""microsoft/prophetnet-large-uncased""" )
lowerCAmelCase_ :int = tokenizer.encode("""sequence builders""" , add_special_tokens=_snake_case )
lowerCAmelCase_ :int = tokenizer.encode("""multi-sequence build""" , add_special_tokens=_snake_case )
lowerCAmelCase_ :int = tokenizer.build_inputs_with_special_tokens(_snake_case )
lowerCAmelCase_ :Any = tokenizer.build_inputs_with_special_tokens(_snake_case , _snake_case )
assert encoded_sentence == text + [102]
assert encoded_pair == text + [102] + text_a + [102]
| 84
|
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
snake_case_ : Optional[Any] = "pt"
elif is_tf_available():
snake_case_ : Union[str, Any] = "tf"
else:
snake_case_ : str = "jax"
class __snake_case ( a , unittest.TestCase ):
UpperCAmelCase__ : List[Any] = ByTaTokenizer
UpperCAmelCase__ : int = False
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
super().setUp()
UpperCAmelCase_ = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname)
@cached_property
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
return ByTaTokenizer.from_pretrained('''google/byt5-small''')
def lowerCamelCase ( self : List[str] , **_snake_case : Union[str, Any]):
"""simple docstring"""
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_snake_case)
def lowerCamelCase ( self : Dict , _snake_case : int , _snake_case : Tuple=False , _snake_case : Dict=20 , _snake_case : Optional[Any]=5):
"""simple docstring"""
UpperCAmelCase_ = []
for i in range(len(_snake_case)):
try:
UpperCAmelCase_ = tokenizer.decode([i] , clean_up_tokenization_spaces=_snake_case)
except UnicodeDecodeError:
pass
toks.append((i, tok))
UpperCAmelCase_ = list(filter(lambda _snake_case: re.match(r'''^[ a-zA-Z]+$''' , t[1]) , _snake_case))
UpperCAmelCase_ = list(filter(lambda _snake_case: [t[0]] == tokenizer.encode(t[1] , add_special_tokens=_snake_case) , _snake_case))
if max_length is not None and len(_snake_case) > max_length:
UpperCAmelCase_ = toks[:max_length]
if min_length is not None and len(_snake_case) < min_length and len(_snake_case) > 0:
while len(_snake_case) < min_length:
UpperCAmelCase_ = toks + toks
# toks_str = [t[1] for t in toks]
UpperCAmelCase_ = [t[0] for t in toks]
# Ensure consistency
UpperCAmelCase_ = tokenizer.decode(_snake_case , clean_up_tokenization_spaces=_snake_case)
if " " not in output_txt and len(_snake_case) > 1:
UpperCAmelCase_ = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=_snake_case)
+ ''' '''
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=_snake_case)
)
if with_prefix_space:
UpperCAmelCase_ = ''' ''' + output_txt
UpperCAmelCase_ = tokenizer.encode(_snake_case , add_special_tokens=_snake_case)
return output_txt, output_ids
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
UpperCAmelCase_ = self.ta_base_tokenizer
UpperCAmelCase_ = tokenizer(['''hi</s>''', '''I went to the gym</s>''', '''</s>'''])
UpperCAmelCase_ = tokenizer(['''hi''', '''I went to the gym''', ''''''])
self.assertListEqual(batch_with_eos_added['''input_ids'''] , batch_without_eos_added['''input_ids'''])
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ = self.ta_base_tokenizer
UpperCAmelCase_ = '''Unicode €.'''
UpperCAmelCase_ = tokenizer(_snake_case)
UpperCAmelCase_ = [88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1]
self.assertEqual(encoded['''input_ids'''] , _snake_case)
# decoding
UpperCAmelCase_ = tokenizer.decode(_snake_case)
self.assertEqual(_snake_case , '''Unicode €.</s>''')
UpperCAmelCase_ = tokenizer('''e è é ê ë''')
UpperCAmelCase_ = [104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1]
self.assertEqual(encoded['''input_ids'''] , _snake_case)
# decoding
UpperCAmelCase_ = tokenizer.decode(_snake_case)
self.assertEqual(_snake_case , '''e è é ê ë</s>''')
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('''e è é ê ë''')) , '''e è é ê ë</s>''')
def lowerCamelCase ( self : Any):
"""simple docstring"""
UpperCAmelCase_ = self.ta_base_tokenizer
UpperCAmelCase_ = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
# fmt: off
UpperCAmelCase_ = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0]
# fmt: on
UpperCAmelCase_ = tokenizer(_snake_case , padding=_snake_case , return_tensors=_snake_case)
self.assertIsInstance(_snake_case , _snake_case)
if FRAMEWORK != "jax":
UpperCAmelCase_ = list(batch.input_ids.numpy()[0])
else:
UpperCAmelCase_ = list(batch.input_ids.tolist()[0])
self.assertListEqual(_snake_case , _snake_case)
self.assertEqual((2, 37) , batch.input_ids.shape)
self.assertEqual((2, 37) , batch.attention_mask.shape)
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = self.ta_base_tokenizer
UpperCAmelCase_ = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
UpperCAmelCase_ = tokenizer(_snake_case , padding=_snake_case , return_tensors=_snake_case)
# check if input_ids are returned and no decoder_input_ids
self.assertIn('''input_ids''' , _snake_case)
self.assertIn('''attention_mask''' , _snake_case)
self.assertNotIn('''decoder_input_ids''' , _snake_case)
self.assertNotIn('''decoder_attention_mask''' , _snake_case)
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = self.ta_base_tokenizer
UpperCAmelCase_ = [
'''Summary of the text.''',
'''Another summary.''',
]
UpperCAmelCase_ = tokenizer(
text_target=_snake_case , max_length=32 , padding='''max_length''' , truncation=_snake_case , return_tensors=_snake_case)
self.assertEqual(32 , targets['''input_ids'''].shape[1])
def lowerCamelCase ( self : int):
"""simple docstring"""
UpperCAmelCase_ = self.ta_base_tokenizer
UpperCAmelCase_ = ['''A long paragraph for summarization. </s>''']
UpperCAmelCase_ = ['''Summary of the text. </s>''']
# fmt: off
UpperCAmelCase_ = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1]
UpperCAmelCase_ = [86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1]
# fmt: on
UpperCAmelCase_ = tokenizer(_snake_case , text_target=_snake_case)
self.assertEqual(_snake_case , batch['''input_ids'''][0])
self.assertEqual(_snake_case , batch['''labels'''][0])
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}"""):
self.assertNotEqual(tokenizer.model_max_length , 42)
# Now let's start the test
UpperCAmelCase_ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}"""):
# Isolate this from the other tests because we save additional tokens/etc
UpperCAmelCase_ = tempfile.mkdtemp()
UpperCAmelCase_ = ''' He is very happy, UNwant\u00E9d,running'''
UpperCAmelCase_ = tokenizer.encode(_snake_case , add_special_tokens=_snake_case)
tokenizer.save_pretrained(_snake_case)
UpperCAmelCase_ = tokenizer.__class__.from_pretrained(_snake_case)
UpperCAmelCase_ = after_tokenizer.encode(_snake_case , add_special_tokens=_snake_case)
self.assertListEqual(_snake_case , _snake_case)
shutil.rmtree(_snake_case)
UpperCAmelCase_ = self.get_tokenizers(model_max_length=42)
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}"""):
# Isolate this from the other tests because we save additional tokens/etc
UpperCAmelCase_ = tempfile.mkdtemp()
UpperCAmelCase_ = ''' He is very happy, UNwant\u00E9d,running'''
tokenizer.add_tokens(['''bim''', '''bambam'''])
UpperCAmelCase_ = tokenizer.additional_special_tokens
additional_special_tokens.append('''new_additional_special_token''')
tokenizer.add_special_tokens({'''additional_special_tokens''': additional_special_tokens})
UpperCAmelCase_ = tokenizer.encode(_snake_case , add_special_tokens=_snake_case)
tokenizer.save_pretrained(_snake_case)
UpperCAmelCase_ = tokenizer.__class__.from_pretrained(_snake_case)
UpperCAmelCase_ = after_tokenizer.encode(_snake_case , add_special_tokens=_snake_case)
self.assertListEqual(_snake_case , _snake_case)
self.assertIn('''new_additional_special_token''' , after_tokenizer.additional_special_tokens)
self.assertEqual(after_tokenizer.model_max_length , 42)
UpperCAmelCase_ = tokenizer.__class__.from_pretrained(_snake_case , model_max_length=43)
self.assertEqual(tokenizer.model_max_length , 43)
shutil.rmtree(_snake_case)
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()))
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()))
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_snake_case)
with open(os.path.join(_snake_case , '''special_tokens_map.json''') , encoding='''utf-8''') as json_file:
UpperCAmelCase_ = json.load(_snake_case)
with open(os.path.join(_snake_case , '''tokenizer_config.json''') , encoding='''utf-8''') as json_file:
UpperCAmelCase_ = json.load(_snake_case)
UpperCAmelCase_ = [F"""<extra_id_{i}>""" for i in range(125)]
UpperCAmelCase_ = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
UpperCAmelCase_ = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
with open(os.path.join(_snake_case , '''special_tokens_map.json''') , '''w''' , encoding='''utf-8''') as outfile:
json.dump(_snake_case , _snake_case)
with open(os.path.join(_snake_case , '''tokenizer_config.json''') , '''w''' , encoding='''utf-8''') as outfile:
json.dump(_snake_case , _snake_case)
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
UpperCAmelCase_ = tokenizer_class.from_pretrained(
_snake_case , )
self.assertIn(
'''an_additional_special_token''' , tokenizer_without_change_in_init.additional_special_tokens)
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
['''an_additional_special_token'''] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['''an_additional_special_token'''])) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
UpperCAmelCase_ = added_tokens_extra_ids + [AddedToken('''a_new_additional_special_token''' , lstrip=_snake_case)]
UpperCAmelCase_ = tokenizer_class.from_pretrained(
_snake_case , additional_special_tokens=_snake_case , )
self.assertIn('''a_new_additional_special_token''' , tokenizer.additional_special_tokens)
self.assertEqual(
['''a_new_additional_special_token'''] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['''a_new_additional_special_token'''])) , )
def lowerCamelCase ( self : Any):
"""simple docstring"""
UpperCAmelCase_ = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()))
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()))
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_snake_case)
UpperCAmelCase_ = tokenizer_class.from_pretrained(_snake_case)
self.assertTrue(tokenizer.decode([255]) == '''''')
def lowerCamelCase ( self : int):
"""simple docstring"""
pass
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
pass
def lowerCamelCase ( self : Dict):
"""simple docstring"""
pass
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
pass
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = self.get_tokenizers(fast=_snake_case , do_lower_case=_snake_case)
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}"""):
UpperCAmelCase_ = ['''t''', '''h''', '''i''', '''s''', ''' ''', '''i''', '''s''', ''' ''', '''a''', ''' ''', '''t''', '''e''', '''x''', '''t''', '''</s>''']
UpperCAmelCase_ = tokenizer.convert_tokens_to_string(_snake_case)
self.assertIsInstance(_snake_case , _snake_case)
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
UpperCAmelCase_ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}"""):
UpperCAmelCase_ = [
'''bos_token''',
'''eos_token''',
'''unk_token''',
'''sep_token''',
'''pad_token''',
'''cls_token''',
'''mask_token''',
]
UpperCAmelCase_ = 0
UpperCAmelCase_ = tokenizer.convert_ids_to_tokens(
_snake_case , skip_special_tokens=_snake_case)
for attr in attributes_list:
setattr(_snake_case , attr + '''_id''' , _snake_case)
self.assertEqual(getattr(_snake_case , _snake_case) , _snake_case)
self.assertEqual(getattr(_snake_case , attr + '''_id''') , _snake_case)
setattr(_snake_case , attr + '''_id''' , _snake_case)
self.assertEqual(getattr(_snake_case , _snake_case) , _snake_case)
self.assertEqual(getattr(_snake_case , attr + '''_id''') , _snake_case)
setattr(_snake_case , '''additional_special_tokens_ids''' , [])
self.assertListEqual(getattr(_snake_case , '''additional_special_tokens''') , [])
self.assertListEqual(getattr(_snake_case , '''additional_special_tokens_ids''') , [])
setattr(_snake_case , '''additional_special_tokens_ids''' , [token_id_to_test_setters])
self.assertListEqual(getattr(_snake_case , '''additional_special_tokens''') , [token_to_test_setters])
self.assertListEqual(getattr(_snake_case , '''additional_special_tokens_ids''') , [token_id_to_test_setters])
| 51
| 0
|
'''simple docstring'''
import tempfile
import unittest
import numpy as np
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax
if is_flax_available():
import os
from flax.core.frozen_dict import unfreeze
from flax.traverse_util import flatten_dict
from transformers import FlaxBertModel
UpperCamelCase__: int = "0.12" # assumed parallelism: 8
@require_flax
@is_staging_test
class SCREAMING_SNAKE_CASE( unittest.TestCase ):
"""simple docstring"""
@classmethod
def A ( cls : Union[str, Any] ) -> int:
UpperCAmelCase : Optional[Any] = TOKEN
HfFolder.save_token(_snake_case )
@classmethod
def A ( cls : Union[str, Any] ) -> Dict:
try:
delete_repo(token=cls._token , repo_id='''test-model-flax''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-model-flax-org''' )
except HTTPError:
pass
def A ( self : List[str] ) -> Optional[Any]:
UpperCAmelCase : List[Any] = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
UpperCAmelCase : List[str] = FlaxBertModel(_snake_case )
model.push_to_hub('''test-model-flax''' , use_auth_token=self._token )
UpperCAmelCase : Optional[int] = FlaxBertModel.from_pretrained(F"""{USER}/test-model-flax""" )
UpperCAmelCase : Optional[int] = flatten_dict(unfreeze(model.params ) )
UpperCAmelCase : int = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
UpperCAmelCase : Dict = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_snake_case , 1E-3 , msg=F"""{key} not identical""" )
# Reset repo
delete_repo(token=self._token , repo_id='''test-model-flax''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(_snake_case , repo_id='''test-model-flax''' , push_to_hub=_snake_case , use_auth_token=self._token )
UpperCAmelCase : int = FlaxBertModel.from_pretrained(F"""{USER}/test-model-flax""" )
UpperCAmelCase : Tuple = flatten_dict(unfreeze(model.params ) )
UpperCAmelCase : Dict = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
UpperCAmelCase : List[str] = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_snake_case , 1E-3 , msg=F"""{key} not identical""" )
def A ( self : Union[str, Any] ) -> Dict:
UpperCAmelCase : List[str] = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
UpperCAmelCase : int = FlaxBertModel(_snake_case )
model.push_to_hub('''valid_org/test-model-flax-org''' , use_auth_token=self._token )
UpperCAmelCase : Tuple = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' )
UpperCAmelCase : List[Any] = flatten_dict(unfreeze(model.params ) )
UpperCAmelCase : int = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
UpperCAmelCase : int = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_snake_case , 1E-3 , msg=F"""{key} not identical""" )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-model-flax-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(
_snake_case , repo_id='''valid_org/test-model-flax-org''' , push_to_hub=_snake_case , use_auth_token=self._token )
UpperCAmelCase : Union[str, Any] = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' )
UpperCAmelCase : Union[str, Any] = flatten_dict(unfreeze(model.params ) )
UpperCAmelCase : Union[str, Any] = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
UpperCAmelCase : str = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_snake_case , 1E-3 , msg=F"""{key} not identical""" )
def snake_case_ ( _lowerCAmelCase : Dict , _lowerCAmelCase : Dict ) -> int:
UpperCAmelCase : Any = True
UpperCAmelCase : Tuple = flatten_dict(modela.params )
UpperCAmelCase : Optional[Any] = flatten_dict(modela.params )
for key in flat_params_a.keys():
if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1e-4:
UpperCAmelCase : Tuple = False
return models_are_equal
@require_flax
class SCREAMING_SNAKE_CASE( unittest.TestCase ):
"""simple docstring"""
def A ( self : Optional[int] ) -> List[str]:
UpperCAmelCase : Optional[Any] = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
UpperCAmelCase : Optional[int] = FlaxBertModel(_snake_case )
UpperCAmelCase : str = '''bert'''
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(_snake_case , _snake_case ) )
with self.assertRaises(_snake_case ):
UpperCAmelCase : List[Any] = FlaxBertModel.from_pretrained(_snake_case )
UpperCAmelCase : Union[str, Any] = FlaxBertModel.from_pretrained(_snake_case , subfolder=_snake_case )
self.assertTrue(check_models_equal(_snake_case , _snake_case ) )
def A ( self : int ) -> str:
UpperCAmelCase : List[str] = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
UpperCAmelCase : int = FlaxBertModel(_snake_case )
UpperCAmelCase : Optional[Any] = '''bert'''
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(_snake_case , _snake_case ) , max_shard_size='''10KB''' )
with self.assertRaises(_snake_case ):
UpperCAmelCase : Optional[Any] = FlaxBertModel.from_pretrained(_snake_case )
UpperCAmelCase : Union[str, Any] = FlaxBertModel.from_pretrained(_snake_case , subfolder=_snake_case )
self.assertTrue(check_models_equal(_snake_case , _snake_case ) )
def A ( self : List[Any] ) -> List[Any]:
UpperCAmelCase : Union[str, Any] = '''bert'''
UpperCAmelCase : int = '''hf-internal-testing/tiny-random-bert-subfolder'''
with self.assertRaises(_snake_case ):
UpperCAmelCase : Optional[int] = FlaxBertModel.from_pretrained(_snake_case )
UpperCAmelCase : Tuple = FlaxBertModel.from_pretrained(_snake_case , subfolder=_snake_case )
self.assertIsNotNone(_snake_case )
def A ( self : Optional[int] ) -> List[Any]:
UpperCAmelCase : Union[str, Any] = '''bert'''
UpperCAmelCase : List[str] = '''hf-internal-testing/tiny-random-bert-sharded-subfolder'''
with self.assertRaises(_snake_case ):
UpperCAmelCase : Dict = FlaxBertModel.from_pretrained(_snake_case )
UpperCAmelCase : Union[str, Any] = FlaxBertModel.from_pretrained(_snake_case , subfolder=_snake_case )
self.assertIsNotNone(_snake_case )
| 23
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
snake_case_ : Dict = {"configuration_mbart": ["MBART_PRETRAINED_CONFIG_ARCHIVE_MAP", "MBartConfig", "MBartOnnxConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Tuple = ["MBartTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : str = ["MBartTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : List[Any] = [
"MBART_PRETRAINED_MODEL_ARCHIVE_LIST",
"MBartForCausalLM",
"MBartForConditionalGeneration",
"MBartForQuestionAnswering",
"MBartForSequenceClassification",
"MBartModel",
"MBartPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Any = [
"TFMBartForConditionalGeneration",
"TFMBartModel",
"TFMBartPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : List[str] = [
"FlaxMBartForConditionalGeneration",
"FlaxMBartForQuestionAnswering",
"FlaxMBartForSequenceClassification",
"FlaxMBartModel",
"FlaxMBartPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart import MBartTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart_fast import MBartTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mbart import (
MBART_PRETRAINED_MODEL_ARCHIVE_LIST,
MBartForCausalLM,
MBartForConditionalGeneration,
MBartForQuestionAnswering,
MBartForSequenceClassification,
MBartModel,
MBartPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mbart import (
FlaxMBartForConditionalGeneration,
FlaxMBartForQuestionAnswering,
FlaxMBartForSequenceClassification,
FlaxMBartModel,
FlaxMBartPreTrainedModel,
)
else:
import sys
snake_case_ : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 51
| 0
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class lowerCAmelCase ( __UpperCamelCase ):
UpperCAmelCase__ = '''naver-clova-ix/donut-base-finetuned-docvqa'''
UpperCAmelCase__ = (
'''This is a tool that answers a question about an document (pdf). It takes an input named `document` which '''
'''should be the document containing the information, as well as a `question` that is the question about the '''
'''document. It returns a text that contains the answer to the question.'''
)
UpperCAmelCase__ = '''document_qa'''
UpperCAmelCase__ = AutoProcessor
UpperCAmelCase__ = VisionEncoderDecoderModel
UpperCAmelCase__ = ['''image''', '''text''']
UpperCAmelCase__ = ['''text''']
def __init__( self : List[Any] , *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : List[str] ) -> Union[str, Any]:
if not is_vision_available():
raise ValueError('Pillow must be installed to use the DocumentQuestionAnsweringTool.' )
super().__init__(*_snake_case , **_snake_case )
def A_ ( self : Optional[Any] , UpperCAmelCase : "Image" , UpperCAmelCase : str ) -> List[str]:
lowerCamelCase__ : Union[str, Any] = '<s_docvqa><s_question>{user_input}</s_question><s_answer>'
lowerCamelCase__ : str = task_prompt.replace('{user_input}' , _snake_case )
lowerCamelCase__ : int = self.pre_processor.tokenizer(
_snake_case , add_special_tokens=_snake_case , return_tensors='pt' ).input_ids
lowerCamelCase__ : Dict = self.pre_processor(_snake_case , return_tensors='pt' ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def A_ ( self : Optional[int] , UpperCAmelCase : List[str] ) -> Tuple:
return self.model.generate(
inputs['pixel_values'].to(self.device ) , decoder_input_ids=inputs['decoder_input_ids'].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=_snake_case , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=_snake_case , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=_snake_case , ).sequences
def A_ ( self : Optional[Any] , UpperCAmelCase : Any ) -> Optional[int]:
lowerCamelCase__ : int = self.pre_processor.batch_decode(_snake_case )[0]
lowerCamelCase__ : List[str] = sequence.replace(self.pre_processor.tokenizer.eos_token , '' )
lowerCamelCase__ : int = sequence.replace(self.pre_processor.tokenizer.pad_token , '' )
lowerCamelCase__ : List[Any] = re.sub(R'<.*?>' , '' , _snake_case , count=1 ).strip() # remove first task start token
lowerCamelCase__ : Union[str, Any] = self.pre_processor.tokenajson(_snake_case )
return sequence["answer"]
| 50
|
import warnings
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __snake_case ( a ):
UpperCAmelCase__ : Dict = ['''image_processor''', '''tokenizer''']
UpperCAmelCase__ : Dict = '''FlavaImageProcessor'''
UpperCAmelCase__ : Dict = ('''BertTokenizer''', '''BertTokenizerFast''')
def __init__( self : Union[str, Any] , _snake_case : List[str]=None , _snake_case : str=None , **_snake_case : int):
"""simple docstring"""
UpperCAmelCase_ = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , _snake_case , )
UpperCAmelCase_ = kwargs.pop('''feature_extractor''')
UpperCAmelCase_ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''')
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''')
super().__init__(_snake_case , _snake_case)
UpperCAmelCase_ = self.image_processor
def __call__( self : List[Any] , _snake_case : Optional[ImageInput] = None , _snake_case : Optional[Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]]] = None , _snake_case : bool = True , _snake_case : Union[bool, str, PaddingStrategy] = False , _snake_case : Union[bool, str, TruncationStrategy] = False , _snake_case : Optional[int] = None , _snake_case : int = 0 , _snake_case : Optional[int] = None , _snake_case : Optional[bool] = None , _snake_case : Optional[bool] = None , _snake_case : Optional[bool] = None , _snake_case : Optional[bool] = None , _snake_case : bool = False , _snake_case : bool = False , _snake_case : bool = False , _snake_case : bool = False , _snake_case : bool = True , _snake_case : Optional[Union[str, TensorType]] = None , **_snake_case : Any , ):
"""simple docstring"""
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''')
if text is not None:
UpperCAmelCase_ = self.tokenizer(
text=_snake_case , add_special_tokens=_snake_case , padding=_snake_case , truncation=_snake_case , max_length=_snake_case , stride=_snake_case , pad_to_multiple_of=_snake_case , return_token_type_ids=_snake_case , return_attention_mask=_snake_case , return_overflowing_tokens=_snake_case , return_special_tokens_mask=_snake_case , return_offsets_mapping=_snake_case , return_length=_snake_case , verbose=_snake_case , return_tensors=_snake_case , **_snake_case , )
if images is not None:
UpperCAmelCase_ = self.image_processor(
_snake_case , return_image_mask=_snake_case , return_codebook_pixels=_snake_case , return_tensors=_snake_case , **_snake_case , )
if text is not None and images is not None:
encoding.update(_snake_case)
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_snake_case) , tensor_type=_snake_case)
def lowerCamelCase ( self : Any , *_snake_case : Optional[Any] , **_snake_case : int):
"""simple docstring"""
return self.tokenizer.batch_decode(*_snake_case , **_snake_case)
def lowerCamelCase ( self : Optional[int] , *_snake_case : int , **_snake_case : Dict):
"""simple docstring"""
return self.tokenizer.decode(*_snake_case , **_snake_case)
@property
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ = self.tokenizer.model_input_names
UpperCAmelCase_ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
@property
def lowerCamelCase ( self : str):
"""simple docstring"""
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , _snake_case , )
return self.image_processor_class
@property
def lowerCamelCase ( self : Any):
"""simple docstring"""
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , _snake_case , )
return self.image_processor
| 51
| 0
|
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
_lowercase : Optional[Any] ="pt"
elif is_tf_available():
_lowercase : Union[str, Any] ="tf"
else:
_lowercase : str ="jax"
class snake_case__ (A__ , unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase :List[Any] = ByTaTokenizer
__lowerCAmelCase :int = False
def SCREAMING_SNAKE_CASE__( self ) -> str:
"""simple docstring"""
super().setUp()
a__ : Optional[int] = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def SCREAMING_SNAKE_CASE__( self ) -> str:
"""simple docstring"""
return ByTaTokenizer.from_pretrained("""google/byt5-small""" )
def SCREAMING_SNAKE_CASE__( self , **__lowercase ) -> Any:
"""simple docstring"""
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_snake_case )
def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase=False , __lowercase=2_0 , __lowercase=5 ) -> int:
"""simple docstring"""
a__ : Dict = []
for i in range(len(_snake_case ) ):
try:
a__ : List[Any] = tokenizer.decode([i] , clean_up_tokenization_spaces=_snake_case )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
a__ : int = list(filter(lambda __lowercase : re.match(r"""^[ a-zA-Z]+$""" , t[1] ) , _snake_case ) )
a__ : Dict = list(filter(lambda __lowercase : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=_snake_case ) , _snake_case ) )
if max_length is not None and len(_snake_case ) > max_length:
a__ : Optional[int] = toks[:max_length]
if min_length is not None and len(_snake_case ) < min_length and len(_snake_case ) > 0:
while len(_snake_case ) < min_length:
a__ : Optional[int] = toks + toks
# toks_str = [t[1] for t in toks]
a__ : Any = [t[0] for t in toks]
# Ensure consistency
a__ : List[Any] = tokenizer.decode(_snake_case , clean_up_tokenization_spaces=_snake_case )
if " " not in output_txt and len(_snake_case ) > 1:
a__ : str = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=_snake_case )
+ """ """
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=_snake_case )
)
if with_prefix_space:
a__ : Any = """ """ + output_txt
a__ : List[Any] = tokenizer.encode(_snake_case , add_special_tokens=_snake_case )
return output_txt, output_ids
def SCREAMING_SNAKE_CASE__( self ) -> Dict:
"""simple docstring"""
a__ : Dict = self.ta_base_tokenizer
a__ : str = tokenizer(["""hi</s>""", """I went to the gym</s>""", """</s>"""] )
a__ : int = tokenizer(["""hi""", """I went to the gym""", """"""] )
self.assertListEqual(batch_with_eos_added["""input_ids"""] , batch_without_eos_added["""input_ids"""] )
def SCREAMING_SNAKE_CASE__( self ) -> Union[str, Any]:
"""simple docstring"""
a__ : Optional[Any] = self.ta_base_tokenizer
a__ : str = """Unicode €."""
a__ : Tuple = tokenizer(_snake_case )
a__ : str = [8_8, 1_1_3, 1_0_8, 1_0_2, 1_1_4, 1_0_3, 1_0_4, 3_5, 2_2_9, 1_3_3, 1_7_5, 4_9, 1]
self.assertEqual(encoded["""input_ids"""] , _snake_case )
# decoding
a__ : Dict = tokenizer.decode(_snake_case )
self.assertEqual(_snake_case , """Unicode €.</s>""" )
a__ : List[str] = tokenizer("""e è é ê ë""" )
a__ : Union[str, Any] = [1_0_4, 3_5, 1_9_8, 1_7_1, 3_5, 1_9_8, 1_7_2, 3_5, 1_9_8, 1_7_3, 3_5, 1_9_8, 1_7_4, 1]
self.assertEqual(encoded["""input_ids"""] , _snake_case )
# decoding
a__ : int = tokenizer.decode(_snake_case )
self.assertEqual(_snake_case , """e è é ê ë</s>""" )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode("""e è é ê ë""" ) ) , """e è é ê ë</s>""" )
def SCREAMING_SNAKE_CASE__( self ) -> List[Any]:
"""simple docstring"""
a__ : Dict = self.ta_base_tokenizer
a__ : str = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
# fmt: off
a__ : List[str] = [6_8, 3_5, 1_1_1, 1_1_4, 1_1_3, 1_0_6, 3_5, 1_1_5, 1_0_0, 1_1_7, 1_0_0, 1_0_6, 1_1_7, 1_0_0, 1_1_5, 1_0_7, 3_5, 1_0_5, 1_1_4, 1_1_7, 3_5, 1_1_8, 1_2_0, 1_1_2, 1_1_2, 1_0_0, 1_1_7, 1_0_8, 1_2_5, 1_0_0, 1_1_9, 1_0_8, 1_1_4, 1_1_3, 4_9, 1, 0]
# fmt: on
a__ : str = tokenizer(_snake_case , padding=_snake_case , return_tensors=_snake_case )
self.assertIsInstance(_snake_case , _snake_case )
if FRAMEWORK != "jax":
a__ : Optional[int] = list(batch.input_ids.numpy()[0] )
else:
a__ : Any = list(batch.input_ids.tolist()[0] )
self.assertListEqual(_snake_case , _snake_case )
self.assertEqual((2, 3_7) , batch.input_ids.shape )
self.assertEqual((2, 3_7) , batch.attention_mask.shape )
def SCREAMING_SNAKE_CASE__( self ) -> Optional[Any]:
"""simple docstring"""
a__ : List[str] = self.ta_base_tokenizer
a__ : Dict = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
a__ : Tuple = tokenizer(_snake_case , padding=_snake_case , return_tensors=_snake_case )
# check if input_ids are returned and no decoder_input_ids
self.assertIn("""input_ids""" , _snake_case )
self.assertIn("""attention_mask""" , _snake_case )
self.assertNotIn("""decoder_input_ids""" , _snake_case )
self.assertNotIn("""decoder_attention_mask""" , _snake_case )
def SCREAMING_SNAKE_CASE__( self ) -> List[str]:
"""simple docstring"""
a__ : str = self.ta_base_tokenizer
a__ : Tuple = [
"""Summary of the text.""",
"""Another summary.""",
]
a__ : List[Any] = tokenizer(
text_target=_snake_case , max_length=3_2 , padding="""max_length""" , truncation=_snake_case , return_tensors=_snake_case )
self.assertEqual(3_2 , targets["""input_ids"""].shape[1] )
def SCREAMING_SNAKE_CASE__( self ) -> Dict:
"""simple docstring"""
a__ : Tuple = self.ta_base_tokenizer
a__ : Tuple = ["""A long paragraph for summarization. </s>"""]
a__ : Optional[int] = ["""Summary of the text. </s>"""]
# fmt: off
a__ : Optional[int] = [6_8, 3_5, 1_1_1, 1_1_4, 1_1_3, 1_0_6, 3_5, 1_1_5, 1_0_0, 1_1_7, 1_0_0, 1_0_6, 1_1_7, 1_0_0, 1_1_5, 1_0_7, 3_5, 1_0_5, 1_1_4, 1_1_7, 3_5, 1_1_8, 1_2_0, 1_1_2, 1_1_2, 1_0_0, 1_1_7, 1_0_8, 1_2_5, 1_0_0, 1_1_9, 1_0_8, 1_1_4, 1_1_3, 4_9, 3_5, 1]
a__ : Dict = [8_6, 1_2_0, 1_1_2, 1_1_2, 1_0_0, 1_1_7, 1_2_4, 3_5, 1_1_4, 1_0_5, 3_5, 1_1_9, 1_0_7, 1_0_4, 3_5, 1_1_9, 1_0_4, 1_2_3, 1_1_9, 4_9, 3_5, 1]
# fmt: on
a__ : List[str] = tokenizer(_snake_case , text_target=_snake_case )
self.assertEqual(_snake_case , batch["""input_ids"""][0] )
self.assertEqual(_snake_case , batch["""labels"""][0] )
def SCREAMING_SNAKE_CASE__( self ) -> List[str]:
"""simple docstring"""
a__ : Any = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length , 4_2 )
# Now let's start the test
a__ : str = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
a__ : Dict = tempfile.mkdtemp()
a__ : str = """ He is very happy, UNwant\u00E9d,running"""
a__ : Any = tokenizer.encode(_snake_case , add_special_tokens=_snake_case )
tokenizer.save_pretrained(_snake_case )
a__ : Tuple = tokenizer.__class__.from_pretrained(_snake_case )
a__ : str = after_tokenizer.encode(_snake_case , add_special_tokens=_snake_case )
self.assertListEqual(_snake_case , _snake_case )
shutil.rmtree(_snake_case )
a__ : Any = self.get_tokenizers(model_max_length=4_2 )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
a__ : Optional[Any] = tempfile.mkdtemp()
a__ : Tuple = """ He is very happy, UNwant\u00E9d,running"""
tokenizer.add_tokens(["""bim""", """bambam"""] )
a__ : Union[str, Any] = tokenizer.additional_special_tokens
additional_special_tokens.append("""new_additional_special_token""" )
tokenizer.add_special_tokens({"""additional_special_tokens""": additional_special_tokens} )
a__ : Any = tokenizer.encode(_snake_case , add_special_tokens=_snake_case )
tokenizer.save_pretrained(_snake_case )
a__ : List[Any] = tokenizer.__class__.from_pretrained(_snake_case )
a__ : str = after_tokenizer.encode(_snake_case , add_special_tokens=_snake_case )
self.assertListEqual(_snake_case , _snake_case )
self.assertIn("""new_additional_special_token""" , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 4_2 )
a__ : Optional[Any] = tokenizer.__class__.from_pretrained(_snake_case , model_max_length=4_3 )
self.assertEqual(tokenizer.model_max_length , 4_3 )
shutil.rmtree(_snake_case )
def SCREAMING_SNAKE_CASE__( self ) -> Union[str, Any]:
"""simple docstring"""
a__ : Optional[Any] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_snake_case )
with open(os.path.join(_snake_case , """special_tokens_map.json""" ) , encoding="""utf-8""" ) as json_file:
a__ : List[str] = json.load(_snake_case )
with open(os.path.join(_snake_case , """tokenizer_config.json""" ) , encoding="""utf-8""" ) as json_file:
a__ : Tuple = json.load(_snake_case )
a__ : Dict = [F'''<extra_id_{i}>''' for i in range(1_2_5 )]
a__ : List[Any] = added_tokens_extra_ids + [
"""an_additional_special_token"""
]
a__ : int = added_tokens_extra_ids + [
"""an_additional_special_token"""
]
with open(os.path.join(_snake_case , """special_tokens_map.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(_snake_case , _snake_case )
with open(os.path.join(_snake_case , """tokenizer_config.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(_snake_case , _snake_case )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
a__ : Optional[int] = tokenizer_class.from_pretrained(
_snake_case , )
self.assertIn(
"""an_additional_special_token""" , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
["""an_additional_special_token"""] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(["""an_additional_special_token"""] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
a__ : List[Any] = added_tokens_extra_ids + [AddedToken("""a_new_additional_special_token""" , lstrip=_snake_case )]
a__ : Optional[Any] = tokenizer_class.from_pretrained(
_snake_case , additional_special_tokens=_snake_case , )
self.assertIn("""a_new_additional_special_token""" , tokenizer.additional_special_tokens )
self.assertEqual(
["""a_new_additional_special_token"""] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(["""a_new_additional_special_token"""] ) ) , )
def SCREAMING_SNAKE_CASE__( self ) -> Dict:
"""simple docstring"""
a__ : List[str] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_snake_case )
a__ : int = tokenizer_class.from_pretrained(_snake_case )
self.assertTrue(tokenizer.decode([2_5_5] ) == """""" )
def SCREAMING_SNAKE_CASE__( self ) -> Tuple:
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE__( self ) -> Dict:
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE__( self ) -> Dict:
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE__( self ) -> Optional[int]:
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE__( self ) -> Tuple:
"""simple docstring"""
a__ : Dict = self.get_tokenizers(fast=_snake_case , do_lower_case=_snake_case )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
a__ : List[str] = ["""t""", """h""", """i""", """s""", """ """, """i""", """s""", """ """, """a""", """ """, """t""", """e""", """x""", """t""", """</s>"""]
a__ : Any = tokenizer.convert_tokens_to_string(_snake_case )
self.assertIsInstance(_snake_case , _snake_case )
def SCREAMING_SNAKE_CASE__( self ) -> List[str]:
"""simple docstring"""
a__ : int = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
a__ : Optional[Any] = [
"""bos_token""",
"""eos_token""",
"""unk_token""",
"""sep_token""",
"""pad_token""",
"""cls_token""",
"""mask_token""",
]
a__ : Union[str, Any] = 0
a__ : List[Any] = tokenizer.convert_ids_to_tokens(
_snake_case , skip_special_tokens=_snake_case )
for attr in attributes_list:
setattr(_snake_case , attr + """_id""" , _snake_case )
self.assertEqual(getattr(_snake_case , _snake_case ) , _snake_case )
self.assertEqual(getattr(_snake_case , attr + """_id""" ) , _snake_case )
setattr(_snake_case , attr + """_id""" , _snake_case )
self.assertEqual(getattr(_snake_case , _snake_case ) , _snake_case )
self.assertEqual(getattr(_snake_case , attr + """_id""" ) , _snake_case )
setattr(_snake_case , """additional_special_tokens_ids""" , [] )
self.assertListEqual(getattr(_snake_case , """additional_special_tokens""" ) , [] )
self.assertListEqual(getattr(_snake_case , """additional_special_tokens_ids""" ) , [] )
setattr(_snake_case , """additional_special_tokens_ids""" , [token_id_to_test_setters] )
self.assertListEqual(getattr(_snake_case , """additional_special_tokens""" ) , [token_to_test_setters] )
self.assertListEqual(getattr(_snake_case , """additional_special_tokens_ids""" ) , [token_id_to_test_setters] )
| 170
|
from diffusers.utils.testing_utils import require_onnxruntime
@require_onnxruntime
class __snake_case :
pass
| 51
| 0
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase__ : Any = {
"configuration_mgp_str": ["MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP", "MgpstrConfig"],
"processing_mgp_str": ["MgpstrProcessor"],
"tokenization_mgp_str": ["MgpstrTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : int = [
"MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST",
"MgpstrModel",
"MgpstrPreTrainedModel",
"MgpstrForSceneTextRecognition",
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
lowercase__ : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 328
|
import argparse
import os
import pickle
import sys
import torch
from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl
from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils
from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
# We do this to be able to load python 2 datasets pickles
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
snake_case_ : List[Any] = data_utils.TransfoXLTokenizer
snake_case_ : int = data_utils.TransfoXLCorpus
snake_case_ : List[Any] = data_utils
snake_case_ : int = data_utils
def A (__A : Dict , __A : List[Any] , __A : Union[str, Any] , __A : Tuple ) -> Union[str, Any]:
"""simple docstring"""
if transfo_xl_dataset_file:
# Convert a pre-processed corpus (see original TensorFlow repo)
with open(__A , '''rb''' ) as fp:
UpperCAmelCase_ = pickle.load(__A , encoding='''latin1''' )
# Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
UpperCAmelCase_ = pytorch_dump_folder_path + '''/''' + VOCAB_FILES_NAMES['''pretrained_vocab_file''']
print(F"""Save vocabulary to {pytorch_vocab_dump_path}""" )
UpperCAmelCase_ = corpus.vocab.__dict__
torch.save(__A , __A )
UpperCAmelCase_ = corpus.__dict__
corpus_dict_no_vocab.pop('''vocab''' , __A )
UpperCAmelCase_ = pytorch_dump_folder_path + '''/''' + CORPUS_NAME
print(F"""Save dataset to {pytorch_dataset_dump_path}""" )
torch.save(__A , __A )
if tf_checkpoint_path:
# Convert a pre-trained TensorFlow model
UpperCAmelCase_ = os.path.abspath(__A )
UpperCAmelCase_ = os.path.abspath(__A )
print(F"""Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.""" )
# Initialise PyTorch model
if transfo_xl_config_file == "":
UpperCAmelCase_ = TransfoXLConfig()
else:
UpperCAmelCase_ = TransfoXLConfig.from_json_file(__A )
print(F"""Building PyTorch model from configuration: {config}""" )
UpperCAmelCase_ = TransfoXLLMHeadModel(__A )
UpperCAmelCase_ = load_tf_weights_in_transfo_xl(__A , __A , __A )
# Save pytorch-model
UpperCAmelCase_ = os.path.join(__A , __A )
UpperCAmelCase_ = os.path.join(__A , __A )
print(F"""Save PyTorch model to {os.path.abspath(__A )}""" )
torch.save(model.state_dict() , __A )
print(F"""Save configuration file to {os.path.abspath(__A )}""" )
with open(__A , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
snake_case_ : List[str] = argparse.ArgumentParser()
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=True,
help="Path to the folder to store the PyTorch model or dataset/vocab.",
)
parser.add_argument(
"--tf_checkpoint_path",
default="",
type=str,
help="An optional path to a TensorFlow checkpoint path to be converted.",
)
parser.add_argument(
"--transfo_xl_config_file",
default="",
type=str,
help=(
"An optional config json file corresponding to the pre-trained BERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--transfo_xl_dataset_file",
default="",
type=str,
help="An optional dataset file to be converted in a vocabulary.",
)
snake_case_ : int = parser.parse_args()
convert_transfo_xl_checkpoint_to_pytorch(
args.tf_checkpoint_path,
args.transfo_xl_config_file,
args.pytorch_dump_folder_path,
args.transfo_xl_dataset_file,
)
| 51
| 0
|
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> Tuple:
assert x is not None
assert y is not None
A_ = len(__A )
A_ = len(__A )
# declaring the array for storing the dp values
A_ = [[0] * (n + 1) for _ in range(m + 1 )] # noqa: E741
for i in range(1, m + 1 ):
for j in range(1, n + 1 ):
A_ = 1 if x[i - 1] == y[j - 1] else 0
A_ = max(l[i - 1][j], l[i][j - 1], l[i - 1][j - 1] + match )
A_ = """"""
A_ , A_ = m, n
while i > 0 and j > 0:
A_ = 1 if x[i - 1] == y[j - 1] else 0
if l[i][j] == l[i - 1][j - 1] + match:
if match == 1:
A_ = x[i - 1] + seq
i -= 1
j -= 1
elif l[i][j] == l[i - 1][j]:
i -= 1
else:
j -= 1
return l[m][n], seq
if __name__ == "__main__":
__lowerCamelCase = "AGGTAB"
__lowerCamelCase = "GXTXAYB"
__lowerCamelCase = 4
__lowerCamelCase = "GTAB"
__lowerCamelCase = longest_common_subsequence(a, b)
print('''len =''', ln, ''', sub-sequence =''', subseq)
import doctest
doctest.testmod()
| 162
|
from typing import Optional, Tuple, Union
import torch
from einops import rearrange, reduce
from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNetaDConditionModel
from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput
from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput
snake_case_ : List[str] = 8
def A (__A : Union[str, Any] , __A : List[Any]=BITS ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ = x.device
UpperCAmelCase_ = (x * 255).int().clamp(0 , 255 )
UpperCAmelCase_ = 2 ** torch.arange(bits - 1 , -1 , -1 , device=__A )
UpperCAmelCase_ = rearrange(__A , '''d -> d 1 1''' )
UpperCAmelCase_ = rearrange(__A , '''b c h w -> b c 1 h w''' )
UpperCAmelCase_ = ((x & mask) != 0).float()
UpperCAmelCase_ = rearrange(__A , '''b c d h w -> b (c d) h w''' )
UpperCAmelCase_ = bits * 2 - 1
return bits
def A (__A : Dict , __A : Tuple=BITS ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ = x.device
UpperCAmelCase_ = (x > 0).int()
UpperCAmelCase_ = 2 ** torch.arange(bits - 1 , -1 , -1 , device=__A , dtype=torch.intaa )
UpperCAmelCase_ = rearrange(__A , '''d -> d 1 1''' )
UpperCAmelCase_ = rearrange(__A , '''b (c d) h w -> b c d h w''' , d=8 )
UpperCAmelCase_ = reduce(x * mask , '''b c d h w -> b c h w''' , '''sum''' )
return (dec / 255).clamp(0.0 , 1.0 )
def A (self : List[Any] , __A : torch.FloatTensor , __A : int , __A : torch.FloatTensor , __A : float = 0.0 , __A : bool = True , __A : Tuple=None , __A : bool = True , ) -> Union[DDIMSchedulerOutput, Tuple]:
"""simple docstring"""
if self.num_inference_steps is None:
raise ValueError(
'''Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler''' )
# See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf
# Ideally, read DDIM paper in-detail understanding
# Notation (<variable name> -> <name in paper>
# - pred_noise_t -> e_theta(x_t, t)
# - pred_original_sample -> f_theta(x_t, t) or x_0
# - std_dev_t -> sigma_t
# - eta -> η
# - pred_sample_direction -> "direction pointing to x_t"
# - pred_prev_sample -> "x_t-1"
# 1. get previous step value (=t-1)
UpperCAmelCase_ = timestep - self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
UpperCAmelCase_ = self.alphas_cumprod[timestep]
UpperCAmelCase_ = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
UpperCAmelCase_ = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
UpperCAmelCase_ = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
# 4. Clip "predicted x_0"
UpperCAmelCase_ = self.bit_scale
if self.config.clip_sample:
UpperCAmelCase_ = torch.clamp(__A , -scale , __A )
# 5. compute variance: "sigma_t(η)" -> see formula (16)
# σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1)
UpperCAmelCase_ = self._get_variance(__A , __A )
UpperCAmelCase_ = eta * variance ** 0.5
if use_clipped_model_output:
# the model_output is always re-derived from the clipped x_0 in Glide
UpperCAmelCase_ = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
# 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
UpperCAmelCase_ = (1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output
# 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
UpperCAmelCase_ = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if eta > 0:
# randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072
UpperCAmelCase_ = model_output.device if torch.is_tensor(__A ) else '''cpu'''
UpperCAmelCase_ = torch.randn(model_output.shape , dtype=model_output.dtype , generator=__A ).to(__A )
UpperCAmelCase_ = self._get_variance(__A , __A ) ** 0.5 * eta * noise
UpperCAmelCase_ = prev_sample + variance
if not return_dict:
return (prev_sample,)
return DDIMSchedulerOutput(prev_sample=__A , pred_original_sample=__A )
def A (self : Optional[int] , __A : torch.FloatTensor , __A : int , __A : torch.FloatTensor , __A : int="epsilon" , __A : Optional[Any]=None , __A : bool = True , ) -> Union[DDPMSchedulerOutput, Tuple]:
"""simple docstring"""
UpperCAmelCase_ = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]:
UpperCAmelCase_ , UpperCAmelCase_ = torch.split(__A , sample.shape[1] , dim=1 )
else:
UpperCAmelCase_ = None
# 1. compute alphas, betas
UpperCAmelCase_ = self.alphas_cumprod[t]
UpperCAmelCase_ = self.alphas_cumprod[t - 1] if t > 0 else self.one
UpperCAmelCase_ = 1 - alpha_prod_t
UpperCAmelCase_ = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if prediction_type == "epsilon":
UpperCAmelCase_ = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif prediction_type == "sample":
UpperCAmelCase_ = model_output
else:
raise ValueError(F"""Unsupported prediction_type {prediction_type}.""" )
# 3. Clip "predicted x_0"
UpperCAmelCase_ = self.bit_scale
if self.config.clip_sample:
UpperCAmelCase_ = torch.clamp(__A , -scale , __A )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
UpperCAmelCase_ = (alpha_prod_t_prev ** 0.5 * self.betas[t]) / beta_prod_t
UpperCAmelCase_ = self.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
UpperCAmelCase_ = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
UpperCAmelCase_ = 0
if t > 0:
UpperCAmelCase_ = torch.randn(
model_output.size() , dtype=model_output.dtype , layout=model_output.layout , generator=__A ).to(model_output.device )
UpperCAmelCase_ = (self._get_variance(__A , predicted_variance=__A ) ** 0.5) * noise
UpperCAmelCase_ = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return DDPMSchedulerOutput(prev_sample=__A , pred_original_sample=__A )
class __snake_case ( a ):
def __init__( self : Union[str, Any] , _snake_case : UNetaDConditionModel , _snake_case : Union[DDIMScheduler, DDPMScheduler] , _snake_case : Optional[float] = 1.0 , ):
"""simple docstring"""
super().__init__()
UpperCAmelCase_ = bit_scale
UpperCAmelCase_ = (
ddim_bit_scheduler_step if isinstance(_snake_case , _snake_case) else ddpm_bit_scheduler_step
)
self.register_modules(unet=_snake_case , scheduler=_snake_case)
@torch.no_grad()
def __call__( self : Union[str, Any] , _snake_case : Optional[int] = 256 , _snake_case : Optional[int] = 256 , _snake_case : Optional[int] = 50 , _snake_case : Optional[torch.Generator] = None , _snake_case : Optional[int] = 1 , _snake_case : Optional[str] = "pil" , _snake_case : bool = True , **_snake_case : Optional[Any] , ):
"""simple docstring"""
UpperCAmelCase_ = torch.randn(
(batch_size, self.unet.config.in_channels, height, width) , generator=_snake_case , )
UpperCAmelCase_ = decimal_to_bits(_snake_case) * self.bit_scale
UpperCAmelCase_ = latents.to(self.device)
self.scheduler.set_timesteps(_snake_case)
for t in self.progress_bar(self.scheduler.timesteps):
# predict the noise residual
UpperCAmelCase_ = self.unet(_snake_case , _snake_case).sample
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase_ = self.scheduler.step(_snake_case , _snake_case , _snake_case).prev_sample
UpperCAmelCase_ = bits_to_decimal(_snake_case)
if output_type == "pil":
UpperCAmelCase_ = self.numpy_to_pil(_snake_case)
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_snake_case)
| 51
| 0
|
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__lowercase : Optional[int] = logging.get_logger(__name__)
__lowercase : Dict = {
"SenseTime/deformable-detr": "https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json",
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class __lowercase ( _lowercase ):
lowerCamelCase : Any = '''deformable_detr'''
lowerCamelCase : List[Any] = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__(self , A=True , A=None , A=3 , A=3_0_0 , A=1_0_2_4 , A=6 , A=1_0_2_4 , A=8 , A=6 , A=1_0_2_4 , A=8 , A=0.0 , A=True , A="relu" , A=2_5_6 , A=0.1 , A=0.0 , A=0.0 , A=0.02 , A=1.0 , A=True , A=False , A="sine" , A="resnet50" , A=True , A=False , A=4 , A=4 , A=4 , A=False , A=3_0_0 , A=False , A=1 , A=5 , A=2 , A=1 , A=1 , A=5 , A=2 , A=0.1 , A=0.25 , A=False , **A , ):
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
lowerCamelCase_ : Union[str, Any] = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(_snake_case , _snake_case ):
lowerCamelCase_ : str = backbone_config.get('''model_type''' )
lowerCamelCase_ : str = CONFIG_MAPPING[backbone_model_type]
lowerCamelCase_ : Tuple = config_class.from_dict(_snake_case )
lowerCamelCase_ : List[str] = use_timm_backbone
lowerCamelCase_ : Optional[Any] = backbone_config
lowerCamelCase_ : Union[str, Any] = num_channels
lowerCamelCase_ : Optional[Any] = num_queries
lowerCamelCase_ : Tuple = max_position_embeddings
lowerCamelCase_ : Any = d_model
lowerCamelCase_ : Optional[Any] = encoder_ffn_dim
lowerCamelCase_ : List[Any] = encoder_layers
lowerCamelCase_ : List[Any] = encoder_attention_heads
lowerCamelCase_ : Any = decoder_ffn_dim
lowerCamelCase_ : List[str] = decoder_layers
lowerCamelCase_ : Union[str, Any] = decoder_attention_heads
lowerCamelCase_ : Union[str, Any] = dropout
lowerCamelCase_ : str = attention_dropout
lowerCamelCase_ : Optional[int] = activation_dropout
lowerCamelCase_ : Dict = activation_function
lowerCamelCase_ : Any = init_std
lowerCamelCase_ : Dict = init_xavier_std
lowerCamelCase_ : Dict = encoder_layerdrop
lowerCamelCase_ : Tuple = auxiliary_loss
lowerCamelCase_ : Optional[Any] = position_embedding_type
lowerCamelCase_ : Tuple = backbone
lowerCamelCase_ : List[Any] = use_pretrained_backbone
lowerCamelCase_ : Union[str, Any] = dilation
# deformable attributes
lowerCamelCase_ : int = num_feature_levels
lowerCamelCase_ : Union[str, Any] = encoder_n_points
lowerCamelCase_ : Optional[Any] = decoder_n_points
lowerCamelCase_ : Optional[Any] = two_stage
lowerCamelCase_ : Optional[Any] = two_stage_num_proposals
lowerCamelCase_ : List[Any] = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError('''If two_stage is True, with_box_refine must be True.''' )
# Hungarian matcher
lowerCamelCase_ : Optional[int] = class_cost
lowerCamelCase_ : Optional[Any] = bbox_cost
lowerCamelCase_ : Union[str, Any] = giou_cost
# Loss coefficients
lowerCamelCase_ : Optional[Any] = mask_loss_coefficient
lowerCamelCase_ : Union[str, Any] = dice_loss_coefficient
lowerCamelCase_ : Optional[Any] = bbox_loss_coefficient
lowerCamelCase_ : str = giou_loss_coefficient
lowerCamelCase_ : Optional[int] = eos_coefficient
lowerCamelCase_ : Union[str, Any] = focal_alpha
lowerCamelCase_ : Dict = disable_custom_kernels
super().__init__(is_encoder_decoder=_snake_case , **_snake_case )
@property
def UpperCAmelCase__ (self ):
return self.encoder_attention_heads
@property
def UpperCAmelCase__ (self ):
return self.d_model
def UpperCAmelCase__ (self ):
lowerCamelCase_ : str = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
lowerCamelCase_ : Tuple = self.backbone_config.to_dict()
lowerCamelCase_ : int = self.__class__.model_type
return output
| 318
|
snake_case_ : Dict = {
"Pillow": "Pillow",
"accelerate": "accelerate>=0.11.0",
"compel": "compel==0.1.8",
"black": "black~=23.1",
"datasets": "datasets",
"filelock": "filelock",
"flax": "flax>=0.4.1",
"hf-doc-builder": "hf-doc-builder>=0.3.0",
"huggingface-hub": "huggingface-hub>=0.13.2",
"requests-mock": "requests-mock==1.10.0",
"importlib_metadata": "importlib_metadata",
"invisible-watermark": "invisible-watermark",
"isort": "isort>=5.5.4",
"jax": "jax>=0.2.8,!=0.3.2",
"jaxlib": "jaxlib>=0.1.65",
"Jinja2": "Jinja2",
"k-diffusion": "k-diffusion>=0.0.12",
"torchsde": "torchsde",
"note_seq": "note_seq",
"librosa": "librosa",
"numpy": "numpy",
"omegaconf": "omegaconf",
"parameterized": "parameterized",
"protobuf": "protobuf>=3.20.3,<4",
"pytest": "pytest",
"pytest-timeout": "pytest-timeout",
"pytest-xdist": "pytest-xdist",
"ruff": "ruff>=0.0.241",
"safetensors": "safetensors",
"sentencepiece": "sentencepiece>=0.1.91,!=0.1.92",
"scipy": "scipy",
"onnx": "onnx",
"regex": "regex!=2019.12.17",
"requests": "requests",
"tensorboard": "tensorboard",
"torch": "torch>=1.4",
"torchvision": "torchvision",
"transformers": "transformers>=4.25.1",
"urllib3": "urllib3<=2.0.0",
}
| 51
| 0
|
import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class __lowerCamelCase :
def __init__( self: int,A_: Tuple,A_: Union[str, Any]=sys.maxsize ):
'''simple docstring'''
__UpperCamelCase = 'bilinear'
__UpperCamelCase = max_size
__UpperCamelCase = short_edge_length
def __call__( self: Tuple,A_: int ):
'''simple docstring'''
__UpperCamelCase = []
for img in imgs:
__UpperCamelCase, __UpperCamelCase = img.shape[:2]
# later: provide list and randomly choose index for resize
__UpperCamelCase = np.random.randint(self.short_edge_length[0],self.short_edge_length[1] + 1 )
if size == 0:
return img
__UpperCamelCase = size * 1.0 / min(_snake_case,_snake_case )
if h < w:
__UpperCamelCase, __UpperCamelCase = size, scale * w
else:
__UpperCamelCase, __UpperCamelCase = scale * h, size
if max(_snake_case,_snake_case ) > self.max_size:
__UpperCamelCase = self.max_size * 1.0 / max(_snake_case,_snake_case )
__UpperCamelCase = newh * scale
__UpperCamelCase = neww * scale
__UpperCamelCase = int(neww + 0.5 )
__UpperCamelCase = int(newh + 0.5 )
if img.dtype == np.uinta:
__UpperCamelCase = Image.fromarray(_snake_case )
__UpperCamelCase = pil_image.resize((neww, newh),PILImageResampling.BILINEAR )
__UpperCamelCase = np.asarray(_snake_case )
else:
__UpperCamelCase = img.permute(2,0,1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw
__UpperCamelCase = nn.functional.interpolate(
_snake_case,(newh, neww),mode=self.interp_method,align_corners=_snake_case ).squeeze(0 )
img_augs.append(_snake_case )
return img_augs
class __lowerCamelCase :
def __init__( self: Tuple,A_: int ):
'''simple docstring'''
__UpperCamelCase = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST],cfg.INPUT.MAX_SIZE_TEST )
__UpperCamelCase = cfg.INPUT.FORMAT
__UpperCamelCase = cfg.SIZE_DIVISIBILITY
__UpperCamelCase = cfg.PAD_VALUE
__UpperCamelCase = cfg.INPUT.MAX_SIZE_TEST
__UpperCamelCase = cfg.MODEL.DEVICE
__UpperCamelCase = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ),1,1 )
__UpperCamelCase = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ),1,1 )
__UpperCamelCase = lambda A_ : (x - self.pixel_mean) / self.pixel_std
def snake_case_ ( self: str,A_: List[str] ):
'''simple docstring'''
__UpperCamelCase = tuple(max(_snake_case ) for s in zip(*[img.shape for img in images] ) )
__UpperCamelCase = [im.shape[-2:] for im in images]
__UpperCamelCase = [
nn.functional.pad(
_snake_case,[0, max_size[-1] - size[1], 0, max_size[-2] - size[0]],value=self.pad_value,)
for size, im in zip(_snake_case,_snake_case )
]
return torch.stack(_snake_case ), torch.tensor(_snake_case )
def __call__( self: str,A_: List[str],A_: int=False ):
'''simple docstring'''
with torch.no_grad():
if not isinstance(_snake_case,_snake_case ):
__UpperCamelCase = [images]
if single_image:
assert len(_snake_case ) == 1
for i in range(len(_snake_case ) ):
if isinstance(images[i],torch.Tensor ):
images.insert(_snake_case,images.pop(_snake_case ).to(self.device ).float() )
elif not isinstance(images[i],torch.Tensor ):
images.insert(
_snake_case,torch.as_tensor(img_tensorize(images.pop(_snake_case ),input_format=self.input_format ) )
.to(self.device )
.float(),)
# resize smallest edge
__UpperCamelCase = torch.tensor([im.shape[:2] for im in images] )
__UpperCamelCase = self.aug(_snake_case )
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
__UpperCamelCase = [self.normalizer(_snake_case ) for x in images]
# now pad them to do the following operations
__UpperCamelCase, __UpperCamelCase = self.pad(_snake_case )
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
__UpperCamelCase = torch.true_divide(_snake_case,_snake_case )
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def _A ( _lowercase , _lowercase ) -> List[Any]:
"""simple docstring"""
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def _A ( _lowercase , _lowercase ) -> Tuple:
"""simple docstring"""
assert torch.isfinite(__A ).all(), "Box tensor contains infinite or NaN!"
__UpperCamelCase, __UpperCamelCase = box_size
tensor[:, 0].clamp_(min=0 , max=__A )
tensor[:, 1].clamp_(min=0 , max=__A )
tensor[:, 2].clamp_(min=0 , max=__A )
tensor[:, 3].clamp_(min=0 , max=__A )
| 310
|
from datetime import datetime
import requests
def A (__A : str ) -> bytes:
"""simple docstring"""
UpperCAmelCase_ = '''https://downloadgram.net/wp-json/wppress/video-downloader/video?url='''
UpperCAmelCase_ = requests.get(base_url + url ).json()[0]['''urls'''][0]['''src''']
return requests.get(__A ).content
if __name__ == "__main__":
snake_case_ : Optional[Any] = input("Enter Video/IGTV url: ").strip()
snake_case_ : Any = f"{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4"
with open(file_name, "wb") as fp:
fp.write(download_video(url))
print(f"Done. Video saved to disk as {file_name}.")
| 51
| 0
|
"""simple docstring"""
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase ) -> bool:
"""simple docstring"""
lowerCAmelCase_ : Tuple = len(__A )
lowerCAmelCase_ : str = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
lowerCAmelCase_ : int = True
# sum is not zero and set is empty then false
for i in range(1 , required_sum + 1 ):
lowerCAmelCase_ : List[Any] = False
for i in range(1 , arr_len + 1 ):
for j in range(1 , required_sum + 1 ):
if arr[i - 1] > j:
lowerCAmelCase_ : str = subset[i - 1][j]
if arr[i - 1] <= j:
lowerCAmelCase_ : int = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 241
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ : Any = logging.get_logger(__name__)
snake_case_ : Optional[Any] = {
"tiiuae/falcon-40b": "https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json",
"tiiuae/falcon-7b": "https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json",
}
class __snake_case ( a ):
UpperCAmelCase__ : Optional[Any] = '''falcon'''
UpperCAmelCase__ : List[Any] = ['''past_key_values''']
def __init__( self : Union[str, Any] , _snake_case : List[str]=65024 , _snake_case : int=4544 , _snake_case : int=32 , _snake_case : Any=71 , _snake_case : int=1e-5 , _snake_case : Dict=0.0_2 , _snake_case : int=True , _snake_case : List[Any]=0.0 , _snake_case : Tuple=0.0 , _snake_case : int=None , _snake_case : Tuple=False , _snake_case : Any=False , _snake_case : str=True , _snake_case : Any=True , _snake_case : List[str]=False , _snake_case : Tuple=11 , _snake_case : Dict=11 , **_snake_case : Optional[int] , ):
"""simple docstring"""
UpperCAmelCase_ = vocab_size
# Backward compatibility with n_embed kwarg
UpperCAmelCase_ = kwargs.pop('''n_embed''' , _snake_case)
UpperCAmelCase_ = hidden_size if n_embed is None else n_embed
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = layer_norm_epsilon
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = use_cache
UpperCAmelCase_ = hidden_dropout
UpperCAmelCase_ = attention_dropout
UpperCAmelCase_ = bos_token_id
UpperCAmelCase_ = eos_token_id
UpperCAmelCase_ = num_attention_heads if num_kv_heads is None else num_kv_heads
UpperCAmelCase_ = alibi
UpperCAmelCase_ = new_decoder_architecture
UpperCAmelCase_ = multi_query # Ignored when new_decoder_architecture is True
UpperCAmelCase_ = parallel_attn
UpperCAmelCase_ = bias
super().__init__(bos_token_id=_snake_case , eos_token_id=_snake_case , **_snake_case)
@property
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
return self.hidden_size // self.num_attention_heads
@property
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
return not self.alibi
| 51
| 0
|
'''simple docstring'''
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class __A :
'''simple docstring'''
def __init__(self , A , A=13 , A=7 , A=True , A=True , A=False , A=True , A=99 , A=32 , A=5 , A=4 , A=37 , A="gelu" , A=0.1 , A=0.1 , A=512 , A=16 , A=2 , A=0.02 , A=3 , A=4 , A=None , ) -> List[str]:
"""simple docstring"""
_a = parent
_a = batch_size
_a = seq_length
_a = is_training
_a = use_input_mask
_a = use_token_type_ids
_a = use_labels
_a = vocab_size
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = intermediate_size
_a = hidden_act
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = max_position_embeddings
_a = type_vocab_size
_a = type_sequence_label_size
_a = initializer_range
_a = num_labels
_a = num_choices
_a = scope
def a__ (self ) -> str:
"""simple docstring"""
_a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_a = None
if self.use_input_mask:
_a = random_attention_mask([self.batch_size, self.seq_length] )
_a = None
if self.use_token_type_ids:
_a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_a = None
_a = None
_a = None
if self.use_labels:
_a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_a = ids_tensor([self.batch_size] , self.num_choices )
_a = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def a__ (self ) -> Dict:
"""simple docstring"""
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_snake_case , initializer_range=self.initializer_range , use_stable_embedding=_snake_case , )
def a__ (self , A , A , A , A , A , A , A ) -> int:
"""simple docstring"""
_a = OpenLlamaModel(config=_snake_case )
model.to(_snake_case )
model.eval()
_a = model(_snake_case , attention_mask=_snake_case )
_a = model(_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a__ (self , A , A , A , A , A , A , A , A , A , ) -> Optional[int]:
"""simple docstring"""
_a = True
_a = OpenLlamaModel(_snake_case )
model.to(_snake_case )
model.eval()
_a = model(
_snake_case , attention_mask=_snake_case , encoder_hidden_states=_snake_case , encoder_attention_mask=_snake_case , )
_a = model(
_snake_case , attention_mask=_snake_case , encoder_hidden_states=_snake_case , )
_a = model(_snake_case , attention_mask=_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a__ (self , A , A , A , A , A , A , A , A , A , ) -> Optional[Any]:
"""simple docstring"""
_a = OpenLlamaForCausalLM(config=_snake_case )
model.to(_snake_case )
model.eval()
_a = model(_snake_case , attention_mask=_snake_case , labels=_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a__ (self , A , A , A , A , A , A , A , A , A , ) -> Any:
"""simple docstring"""
_a = True
_a = True
_a = OpenLlamaForCausalLM(config=_snake_case )
model.to(_snake_case )
model.eval()
# first forward pass
_a = model(
_snake_case , attention_mask=_snake_case , encoder_hidden_states=_snake_case , encoder_attention_mask=_snake_case , use_cache=_snake_case , )
_a = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
_a = ids_tensor((self.batch_size, 3) , config.vocab_size )
_a = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
_a = torch.cat([input_ids, next_tokens] , dim=-1 )
_a = torch.cat([input_mask, next_mask] , dim=-1 )
_a = model(
_snake_case , attention_mask=_snake_case , encoder_hidden_states=_snake_case , encoder_attention_mask=_snake_case , output_hidden_states=_snake_case , )['''hidden_states'''][0]
_a = model(
_snake_case , attention_mask=_snake_case , encoder_hidden_states=_snake_case , encoder_attention_mask=_snake_case , past_key_values=_snake_case , output_hidden_states=_snake_case , )['''hidden_states'''][0]
# select random slice
_a = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_a = output_from_no_past[:, -3:, random_slice_idx].detach()
_a = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_snake_case , _snake_case , atol=1E-3 ) )
def a__ (self ) -> Optional[int]:
"""simple docstring"""
_a = self.prepare_config_and_inputs()
(
(
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) ,
) = config_and_inputs
_a = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __A ( A , A , A , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Dict = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
__lowerCamelCase : int = (OpenLlamaForCausalLM,) if is_torch_available() else ()
__lowerCamelCase : Union[str, Any] = (
{
'''feature-extraction''': OpenLlamaModel,
'''text-classification''': OpenLlamaForSequenceClassification,
'''text-generation''': OpenLlamaForCausalLM,
'''zero-shot''': OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
__lowerCamelCase : Any = False
__lowerCamelCase : Dict = False
def a__ (self ) -> Any:
"""simple docstring"""
_a = OpenLlamaModelTester(self )
_a = ConfigTester(self , config_class=_snake_case , hidden_size=37 )
def a__ (self ) -> Union[str, Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def a__ (self ) -> str:
"""simple docstring"""
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def a__ (self ) -> str:
"""simple docstring"""
_a = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_a = type
self.model_tester.create_and_check_model(*_snake_case )
def a__ (self ) -> Any:
"""simple docstring"""
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
_a = 3
_a = input_dict['''input_ids''']
_a = input_ids.ne(1 ).to(_snake_case )
_a = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
_a = OpenLlamaForSequenceClassification(_snake_case )
model.to(_snake_case )
model.eval()
_a = model(_snake_case , attention_mask=_snake_case , labels=_snake_case )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def a__ (self ) -> List[Any]:
"""simple docstring"""
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
_a = 3
_a = '''single_label_classification'''
_a = input_dict['''input_ids''']
_a = input_ids.ne(1 ).to(_snake_case )
_a = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
_a = OpenLlamaForSequenceClassification(_snake_case )
model.to(_snake_case )
model.eval()
_a = model(_snake_case , attention_mask=_snake_case , labels=_snake_case )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def a__ (self ) -> Optional[int]:
"""simple docstring"""
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
_a = 3
_a = '''multi_label_classification'''
_a = input_dict['''input_ids''']
_a = input_ids.ne(1 ).to(_snake_case )
_a = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
_a = OpenLlamaForSequenceClassification(_snake_case )
model.to(_snake_case )
model.eval()
_a = model(_snake_case , attention_mask=_snake_case , labels=_snake_case )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('''Open-Llama buffers include complex numbers, which breaks this test''' )
def a__ (self ) -> List[str]:
"""simple docstring"""
pass
@parameterized.expand([('''linear''',), ('''dynamic''',)] )
def a__ (self , A ) -> Tuple:
"""simple docstring"""
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
_a = ids_tensor([1, 10] , config.vocab_size )
_a = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
_a = OpenLlamaModel(_snake_case )
original_model.to(_snake_case )
original_model.eval()
_a = original_model(_snake_case ).last_hidden_state
_a = original_model(_snake_case ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
_a = {'''type''': scaling_type, '''factor''': 10.0}
_a = OpenLlamaModel(_snake_case )
scaled_model.to(_snake_case )
scaled_model.eval()
_a = scaled_model(_snake_case ).last_hidden_state
_a = scaled_model(_snake_case ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(_snake_case , _snake_case , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(_snake_case , _snake_case , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(_snake_case , _snake_case , atol=1E-5 ) )
| 211
|
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
snake_case_ : str = 0
snake_case_ : Union[str, Any] = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
snake_case_ : str = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
snake_case_ : List[Any] = tuple[int, int]
class __snake_case :
def __init__( self : Any , _snake_case : int , _snake_case : int , _snake_case : int , _snake_case : int , _snake_case : int , _snake_case : Node | None , ):
"""simple docstring"""
UpperCAmelCase_ = pos_x
UpperCAmelCase_ = pos_y
UpperCAmelCase_ = (pos_y, pos_x)
UpperCAmelCase_ = goal_x
UpperCAmelCase_ = goal_y
UpperCAmelCase_ = g_cost
UpperCAmelCase_ = parent
UpperCAmelCase_ = self.calculate_heuristic()
UpperCAmelCase_ = self.g_cost + self.h_cost
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = self.pos_x - self.goal_x
UpperCAmelCase_ = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(_snake_case) + abs(_snake_case)
else:
return sqrt(dy**2 + dx**2)
def __lt__( self : Union[str, Any] , _snake_case : Node):
"""simple docstring"""
return self.f_cost < other.f_cost
class __snake_case :
def __init__( self : str , _snake_case : TPosition , _snake_case : TPosition):
"""simple docstring"""
UpperCAmelCase_ = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , _snake_case)
UpperCAmelCase_ = Node(goal[1] , goal[0] , goal[1] , goal[0] , 99999 , _snake_case)
UpperCAmelCase_ = [self.start]
UpperCAmelCase_ = []
UpperCAmelCase_ = False
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
UpperCAmelCase_ = self.open_nodes.pop(0)
if current_node.pos == self.target.pos:
return self.retrace_path(_snake_case)
self.closed_nodes.append(_snake_case)
UpperCAmelCase_ = self.get_successors(_snake_case)
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(_snake_case)
else:
# retrieve the best current path
UpperCAmelCase_ = self.open_nodes.pop(self.open_nodes.index(_snake_case))
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(_snake_case)
else:
self.open_nodes.append(_snake_case)
return [self.start.pos]
def lowerCamelCase ( self : Tuple , _snake_case : Node):
"""simple docstring"""
UpperCAmelCase_ = []
for action in delta:
UpperCAmelCase_ = parent.pos_x + action[1]
UpperCAmelCase_ = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0]) - 1 and 0 <= pos_y <= len(_snake_case) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
_snake_case , _snake_case , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , _snake_case , ))
return successors
def lowerCamelCase ( self : Any , _snake_case : Node | None):
"""simple docstring"""
UpperCAmelCase_ = node
UpperCAmelCase_ = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x))
UpperCAmelCase_ = current_node.parent
path.reverse()
return path
class __snake_case :
def __init__( self : Any , _snake_case : TPosition , _snake_case : TPosition):
"""simple docstring"""
UpperCAmelCase_ = AStar(_snake_case , _snake_case)
UpperCAmelCase_ = AStar(_snake_case , _snake_case)
UpperCAmelCase_ = False
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
UpperCAmelCase_ = self.fwd_astar.open_nodes.pop(0)
UpperCAmelCase_ = self.bwd_astar.open_nodes.pop(0)
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
_snake_case , _snake_case)
self.fwd_astar.closed_nodes.append(_snake_case)
self.bwd_astar.closed_nodes.append(_snake_case)
UpperCAmelCase_ = current_bwd_node
UpperCAmelCase_ = current_fwd_node
UpperCAmelCase_ = {
self.fwd_astar: self.fwd_astar.get_successors(_snake_case),
self.bwd_astar: self.bwd_astar.get_successors(_snake_case),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(_snake_case)
else:
# retrieve the best current path
UpperCAmelCase_ = astar.open_nodes.pop(
astar.open_nodes.index(_snake_case))
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(_snake_case)
else:
astar.open_nodes.append(_snake_case)
return [self.fwd_astar.start.pos]
def lowerCamelCase ( self : int , _snake_case : Node , _snake_case : Node):
"""simple docstring"""
UpperCAmelCase_ = self.fwd_astar.retrace_path(_snake_case)
UpperCAmelCase_ = self.bwd_astar.retrace_path(_snake_case)
bwd_path.pop()
bwd_path.reverse()
UpperCAmelCase_ = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
snake_case_ : Any = (0, 0)
snake_case_ : Union[str, Any] = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
snake_case_ : str = time.time()
snake_case_ : List[str] = AStar(init, goal)
snake_case_ : Optional[int] = a_star.search()
snake_case_ : Optional[Any] = time.time() - start_time
print(f"AStar execution time = {end_time:f} seconds")
snake_case_ : int = time.time()
snake_case_ : Dict = BidirectionalAStar(init, goal)
snake_case_ : str = time.time() - bd_start_time
print(f"BidirectionalAStar execution time = {bd_end_time:f} seconds")
| 51
| 0
|
from __future__ import annotations
import inspect
import unittest
from typing import List, Tuple
from transformers import RegNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class a__ :
def __init__( self : Dict,_A : List[Any],_A : Union[str, Any]=3,_A : Any=32,_A : List[str]=3,_A : Optional[int]=10,_A : Any=[10, 20, 30, 40],_A : List[str]=[1, 1, 2, 1],_A : Tuple=True,_A : List[str]=True,_A : Optional[Any]="relu",_A : List[str]=3,_A : Union[str, Any]=None,):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = parent
SCREAMING_SNAKE_CASE_ : List[Any] = batch_size
SCREAMING_SNAKE_CASE_ : List[Any] = image_size
SCREAMING_SNAKE_CASE_ : Any = num_channels
SCREAMING_SNAKE_CASE_ : Dict = embeddings_size
SCREAMING_SNAKE_CASE_ : Optional[int] = hidden_sizes
SCREAMING_SNAKE_CASE_ : Dict = depths
SCREAMING_SNAKE_CASE_ : Dict = is_training
SCREAMING_SNAKE_CASE_ : List[str] = use_labels
SCREAMING_SNAKE_CASE_ : Any = hidden_act
SCREAMING_SNAKE_CASE_ : List[str] = num_labels
SCREAMING_SNAKE_CASE_ : Dict = scope
SCREAMING_SNAKE_CASE_ : Optional[int] = len(_snake_case )
def __UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE_ : Dict = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ids_tensor([self.batch_size],self.num_labels )
SCREAMING_SNAKE_CASE_ : List[Any] = self.get_config()
return config, pixel_values, labels
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
return RegNetConfig(
num_channels=self.num_channels,embeddings_size=self.embeddings_size,hidden_sizes=self.hidden_sizes,depths=self.depths,hidden_act=self.hidden_act,num_labels=self.num_labels,)
def __UpperCamelCase ( self : Any,_A : Tuple,_A : Optional[Any],_A : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = TFRegNetModel(config=_snake_case )
SCREAMING_SNAKE_CASE_ : List[str] = model(_snake_case,training=_snake_case )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape,(self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32),)
def __UpperCamelCase ( self : Optional[int],_A : str,_A : int,_A : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = self.num_labels
SCREAMING_SNAKE_CASE_ : Union[str, Any] = TFRegNetForImageClassification(_snake_case )
SCREAMING_SNAKE_CASE_ : Optional[Any] = model(_snake_case,labels=_snake_case,training=_snake_case )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.num_labels) )
def __UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str = config_and_inputs
SCREAMING_SNAKE_CASE_ : str = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class a__ ( A__ , A__ , unittest.TestCase ):
A = (TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else ()
A = (
{'''feature-extraction''': TFRegNetModel, '''image-classification''': TFRegNetForImageClassification}
if is_tf_available()
else {}
)
A = False
A = False
A = False
A = False
A = False
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = TFRegNetModelTester(self )
SCREAMING_SNAKE_CASE_ : Optional[Any] = ConfigTester(self,config_class=_snake_case,has_text_modality=_snake_case )
def __UpperCamelCase ( self : int ):
"""simple docstring"""
return
@unittest.skip(reason="RegNet does not use inputs_embeds" )
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("GPU" ) ) == 0,reason="TF does not support backprop for grouped convolutions on CPU.",)
@slow
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
super().test_keras_fit()
@unittest.skip(reason="RegNet does not support input and output embeddings" )
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
pass
def __UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ : Any = model_class(_snake_case )
SCREAMING_SNAKE_CASE_ : int = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE_ : Any = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE_ : int = ["pixel_values"]
self.assertListEqual(arg_names[:1],_snake_case )
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def __UpperCamelCase ( self : str ):
"""simple docstring"""
def check_hidden_states_output(_A : int,_A : List[Any],_A : List[Any] ):
SCREAMING_SNAKE_CASE_ : List[str] = model_class(_snake_case )
SCREAMING_SNAKE_CASE_ : Optional[int] = model(**self._prepare_for_class(_snake_case,_snake_case ),training=_snake_case )
SCREAMING_SNAKE_CASE_ : Dict = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
SCREAMING_SNAKE_CASE_ : List[Any] = self.model_tester.num_stages
self.assertEqual(len(_snake_case ),expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ),[self.model_tester.image_size // 2, self.model_tester.image_size // 2],)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : Optional[Any] = ["basic", "bottleneck"]
for model_class in self.all_model_classes:
for layer_type in layers_type:
SCREAMING_SNAKE_CASE_ : Any = layer_type
SCREAMING_SNAKE_CASE_ : Optional[Any] = True
check_hidden_states_output(_snake_case,_snake_case,_snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE_ : Optional[int] = True
check_hidden_states_output(_snake_case,_snake_case,_snake_case )
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
def check_equivalence(_A : Any,_A : str,_A : Any,_A : List[Any]={} ):
SCREAMING_SNAKE_CASE_ : Optional[int] = model(_snake_case,return_dict=_snake_case,**_snake_case )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = model(_snake_case,return_dict=_snake_case,**_snake_case ).to_tuple()
def recursive_check(_A : Tuple,_A : List[Any] ):
if isinstance(_snake_case,(List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(_snake_case,_snake_case ):
recursive_check(_snake_case,_snake_case )
elif tuple_object is None:
return
else:
self.assertTrue(
all(tf.equal(_snake_case,_snake_case ) ),msg=(
"Tuple and dict output are not equal. Difference:"
F' {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}'
),)
recursive_check(_snake_case,_snake_case )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ : List[Any] = model_class(_snake_case )
SCREAMING_SNAKE_CASE_ : Optional[int] = self._prepare_for_class(_snake_case,_snake_case )
SCREAMING_SNAKE_CASE_ : Tuple = self._prepare_for_class(_snake_case,_snake_case )
check_equivalence(_snake_case,_snake_case,_snake_case )
SCREAMING_SNAKE_CASE_ : List[Any] = self._prepare_for_class(_snake_case,_snake_case,return_labels=_snake_case )
SCREAMING_SNAKE_CASE_ : Optional[Any] = self._prepare_for_class(_snake_case,_snake_case,return_labels=_snake_case )
check_equivalence(_snake_case,_snake_case,_snake_case )
SCREAMING_SNAKE_CASE_ : List[Any] = self._prepare_for_class(_snake_case,_snake_case )
SCREAMING_SNAKE_CASE_ : List[Any] = self._prepare_for_class(_snake_case,_snake_case )
check_equivalence(_snake_case,_snake_case,_snake_case,{"output_hidden_states": True} )
SCREAMING_SNAKE_CASE_ : Optional[int] = self._prepare_for_class(_snake_case,_snake_case,return_labels=_snake_case )
SCREAMING_SNAKE_CASE_ : List[str] = self._prepare_for_class(_snake_case,_snake_case,return_labels=_snake_case )
check_equivalence(_snake_case,_snake_case,_snake_case,{"output_hidden_states": True} )
def __UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_snake_case )
@slow
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_ : Optional[Any] = TFRegNetModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
def _snake_case ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class a__ ( unittest.TestCase ):
@cached_property
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
return (
AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.default_image_processor
SCREAMING_SNAKE_CASE_ : Dict = prepare_img()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = image_processor(images=_snake_case,return_tensors="tf" )
# forward pass
SCREAMING_SNAKE_CASE_ : Tuple = model(**_snake_case,training=_snake_case )
# verify the logits
SCREAMING_SNAKE_CASE_ : List[str] = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape,_snake_case )
SCREAMING_SNAKE_CASE_ : List[str] = tf.constant([-0.4180, -1.5051, -3.4836] )
tf.debugging.assert_near(outputs.logits[0, :3],_snake_case,atol=1E-4 )
| 18
|
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class __snake_case :
def __init__( self : List[str] , _snake_case : Union[str, Any] , _snake_case : List[str]=2 , _snake_case : Any=True , _snake_case : Any=False , _snake_case : List[str]=10 , _snake_case : Any=3 , _snake_case : Union[str, Any]=32 * 4 , _snake_case : List[Any]=32 * 6 , _snake_case : Tuple=4 , _snake_case : Dict=32 , ):
"""simple docstring"""
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = is_training
UpperCAmelCase_ = use_auxiliary_loss
UpperCAmelCase_ = num_queries
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = min_size
UpperCAmelCase_ = max_size
UpperCAmelCase_ = num_labels
UpperCAmelCase_ = mask_feature_size
def lowerCamelCase ( self : Any):
"""simple docstring"""
UpperCAmelCase_ = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size]).to(
_snake_case)
UpperCAmelCase_ = torch.ones([self.batch_size, self.min_size, self.max_size] , device=_snake_case)
UpperCAmelCase_ = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=_snake_case) > 0.5
).float()
UpperCAmelCase_ = (torch.rand((self.batch_size, self.num_labels) , device=_snake_case) > 0.5).long()
UpperCAmelCase_ = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def lowerCamelCase ( self : Any):
"""simple docstring"""
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig(
decoder_ffn_dim=128 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , )
def lowerCamelCase ( self : int):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = self.prepare_config_and_inputs()
UpperCAmelCase_ = {'''pixel_values''': pixel_values, '''pixel_mask''': pixel_mask}
return config, inputs_dict
def lowerCamelCase ( self : str , _snake_case : List[Any] , _snake_case : List[str]):
"""simple docstring"""
UpperCAmelCase_ = output.encoder_hidden_states
UpperCAmelCase_ = output.pixel_decoder_hidden_states
UpperCAmelCase_ = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(_snake_case) , len(config.backbone_config.depths))
self.parent.assertTrue(len(_snake_case) , len(config.backbone_config.depths))
self.parent.assertTrue(len(_snake_case) , config.decoder_config.decoder_layers)
def lowerCamelCase ( self : Union[str, Any] , _snake_case : List[str] , _snake_case : int , _snake_case : Optional[Any] , _snake_case : str=False):
"""simple docstring"""
with torch.no_grad():
UpperCAmelCase_ = MaskFormerModel(config=_snake_case)
model.to(_snake_case)
model.eval()
UpperCAmelCase_ = model(pixel_values=_snake_case , pixel_mask=_snake_case)
UpperCAmelCase_ = model(_snake_case , output_hidden_states=_snake_case)
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None)
self.parent.assertTrue(output.encoder_last_hidden_state is not None)
if output_hidden_states:
self.check_output_hidden_state(_snake_case , _snake_case)
def lowerCamelCase ( self : List[Any] , _snake_case : List[Any] , _snake_case : List[Any] , _snake_case : str , _snake_case : Optional[int] , _snake_case : Union[str, Any]):
"""simple docstring"""
UpperCAmelCase_ = MaskFormerForInstanceSegmentation(config=_snake_case)
model.to(_snake_case)
model.eval()
def comm_check_on_output(_snake_case : Tuple):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None)
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None)
self.parent.assertTrue(result.encoder_last_hidden_state is not None)
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1))
with torch.no_grad():
UpperCAmelCase_ = model(pixel_values=_snake_case , pixel_mask=_snake_case)
UpperCAmelCase_ = model(_snake_case)
comm_check_on_output(_snake_case)
UpperCAmelCase_ = model(
pixel_values=_snake_case , pixel_mask=_snake_case , mask_labels=_snake_case , class_labels=_snake_case)
comm_check_on_output(_snake_case)
self.parent.assertTrue(result.loss is not None)
self.parent.assertEqual(result.loss.shape , torch.Size([1]))
@require_torch
class __snake_case ( a , a , unittest.TestCase ):
UpperCAmelCase__ : Union[str, Any] = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
UpperCAmelCase__ : Optional[Any] = (
{'''feature-extraction''': MaskFormerModel, '''image-segmentation''': MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
UpperCAmelCase__ : Dict = False
UpperCAmelCase__ : List[str] = False
UpperCAmelCase__ : Optional[Any] = False
UpperCAmelCase__ : Union[str, Any] = False
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = MaskFormerModelTester(self)
UpperCAmelCase_ = ConfigTester(self , config_class=_snake_case , has_text_modality=_snake_case)
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(_snake_case , **_snake_case , output_hidden_states=_snake_case)
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*_snake_case)
@unittest.skip(reason='''MaskFormer does not use inputs_embeds''')
def lowerCamelCase ( self : Dict):
"""simple docstring"""
pass
@unittest.skip(reason='''MaskFormer does not have a get_input_embeddings method''')
def lowerCamelCase ( self : int):
"""simple docstring"""
pass
@unittest.skip(reason='''MaskFormer is not a generative model''')
def lowerCamelCase ( self : str):
"""simple docstring"""
pass
@unittest.skip(reason='''MaskFormer does not use token embeddings''')
def lowerCamelCase ( self : int):
"""simple docstring"""
pass
@require_torch_multi_gpu
@unittest.skip(
reason='''MaskFormer has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''')
def lowerCamelCase ( self : Any):
"""simple docstring"""
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''')
def lowerCamelCase ( self : str):
"""simple docstring"""
pass
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(_snake_case)
UpperCAmelCase_ = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ = [*signature.parameters.keys()]
UpperCAmelCase_ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _snake_case)
@slow
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
for model_name in ["facebook/maskformer-swin-small-coco"]:
UpperCAmelCase_ = MaskFormerModel.from_pretrained(_snake_case)
self.assertIsNotNone(_snake_case)
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ = (self.model_tester.min_size,) * 2
UpperCAmelCase_ = {
'''pixel_values''': torch.randn((2, 3, *size) , device=_snake_case),
'''mask_labels''': torch.randn((2, 10, *size) , device=_snake_case),
'''class_labels''': torch.zeros(2 , 10 , device=_snake_case).long(),
}
UpperCAmelCase_ = MaskFormerForInstanceSegmentation(MaskFormerConfig()).to(_snake_case)
UpperCAmelCase_ = model(**_snake_case)
self.assertTrue(outputs.loss is not None)
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(_snake_case , **_snake_case , output_hidden_states=_snake_case)
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(_snake_case).to(_snake_case)
UpperCAmelCase_ = model(**_snake_case , output_attentions=_snake_case)
self.assertTrue(outputs.attentions is not None)
def lowerCamelCase ( self : int):
"""simple docstring"""
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
UpperCAmelCase_ = self.all_model_classes[1]
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
UpperCAmelCase_ = model_class(_snake_case)
model.to(_snake_case)
model.train()
UpperCAmelCase_ = model(_snake_case , mask_labels=_snake_case , class_labels=_snake_case).loss
loss.backward()
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
UpperCAmelCase_ = self.all_model_classes[1]
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
UpperCAmelCase_ = True
UpperCAmelCase_ = True
UpperCAmelCase_ = model_class(_snake_case)
model.to(_snake_case)
model.train()
UpperCAmelCase_ = model(_snake_case , mask_labels=_snake_case , class_labels=_snake_case)
UpperCAmelCase_ = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
UpperCAmelCase_ = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
UpperCAmelCase_ = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
UpperCAmelCase_ = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=_snake_case)
self.assertIsNotNone(encoder_hidden_states.grad)
self.assertIsNotNone(pixel_decoder_hidden_states.grad)
self.assertIsNotNone(transformer_decoder_hidden_states.grad)
self.assertIsNotNone(attentions.grad)
snake_case_ : Dict = 1e-4
def A () -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_vision
@slow
class __snake_case ( unittest.TestCase ):
@cached_property
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
return (
MaskFormerImageProcessor.from_pretrained('''facebook/maskformer-swin-small-coco''')
if is_vision_available()
else None
)
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = MaskFormerModel.from_pretrained('''facebook/maskformer-swin-small-coco''').to(_snake_case)
UpperCAmelCase_ = self.default_image_processor
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = image_processor(_snake_case , return_tensors='''pt''').to(_snake_case)
UpperCAmelCase_ = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0)
# check size
self.assertEqual(_snake_case , (1, 3, 800, 1088))
with torch.no_grad():
UpperCAmelCase_ = model(**_snake_case)
UpperCAmelCase_ = torch.tensor(
[[-0.0_4_8_2, 0.9_2_2_8, 0.4_9_5_1], [-0.2_5_4_7, 0.8_0_1_7, 0.8_5_2_7], [-0.0_0_6_9, 0.3_3_8_5, -0.0_0_8_9]]).to(_snake_case)
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , _snake_case , atol=_snake_case))
UpperCAmelCase_ = torch.tensor(
[[-0.8_4_2_2, -0.8_4_3_4, -0.9_7_1_8], [-1.0_1_4_4, -0.5_5_6_5, -0.4_1_9_5], [-1.0_0_3_8, -0.4_4_8_4, -0.1_9_6_1]]).to(_snake_case)
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , _snake_case , atol=_snake_case))
UpperCAmelCase_ = torch.tensor(
[[0.2_8_5_2, -0.0_1_5_9, 0.9_7_3_5], [0.6_2_5_4, 0.1_8_5_8, 0.8_5_2_9], [-0.0_6_8_0, -0.4_1_1_6, 1.8_4_1_3]]).to(_snake_case)
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , _snake_case , atol=_snake_case))
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''')
.to(_snake_case)
.eval()
)
UpperCAmelCase_ = self.default_image_processor
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = image_processor(_snake_case , return_tensors='''pt''').to(_snake_case)
UpperCAmelCase_ = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0)
# check size
self.assertEqual(_snake_case , (1, 3, 800, 1088))
with torch.no_grad():
UpperCAmelCase_ = model(**_snake_case)
# masks_queries_logits
UpperCAmelCase_ = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
UpperCAmelCase_ = [
[-1.3_7_3_7_1_2_4, -1.7_7_2_4_9_3_7, -1.9_3_6_4_2_3_3],
[-1.5_9_7_7_2_8_1, -1.9_8_6_7_9_3_9, -2.1_5_2_3_6_9_5],
[-1.5_7_9_5_3_9_8, -1.9_2_6_9_8_3_2, -2.0_9_3_9_4_2],
]
UpperCAmelCase_ = torch.tensor(_snake_case).to(_snake_case)
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , _snake_case , atol=_snake_case))
# class_queries_logits
UpperCAmelCase_ = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1))
UpperCAmelCase_ = torch.tensor(
[
[1.65_12e00, -5.25_72e00, -3.35_19e00],
[3.61_69e-02, -5.90_25e00, -2.93_13e00],
[1.07_66e-04, -7.76_30e00, -5.12_63e00],
]).to(_snake_case)
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , _snake_case , atol=_snake_case))
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-resnet101-coco-stuff''')
.to(_snake_case)
.eval()
)
UpperCAmelCase_ = self.default_image_processor
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = image_processor(_snake_case , return_tensors='''pt''').to(_snake_case)
UpperCAmelCase_ = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0)
# check size
self.assertEqual(_snake_case , (1, 3, 800, 1088))
with torch.no_grad():
UpperCAmelCase_ = model(**_snake_case)
# masks_queries_logits
UpperCAmelCase_ = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
UpperCAmelCase_ = [[-0.9_0_4_6, -2.6_3_6_6, -4.6_0_6_2], [-3.4_1_7_9, -5.7_8_9_0, -8.8_0_5_7], [-4.9_1_7_9, -7.6_5_6_0, -1_0.7_7_1_1]]
UpperCAmelCase_ = torch.tensor(_snake_case).to(_snake_case)
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , _snake_case , atol=_snake_case))
# class_queries_logits
UpperCAmelCase_ = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1))
UpperCAmelCase_ = torch.tensor(
[[4.7_1_8_8, -3.2_5_8_5, -2.8_8_5_7], [6.6_8_7_1, -2.9_1_8_1, -1.2_4_8_7], [7.2_4_4_9, -2.2_7_6_4, -2.1_8_7_4]]).to(_snake_case)
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , _snake_case , atol=_snake_case))
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''')
.to(_snake_case)
.eval()
)
UpperCAmelCase_ = self.default_image_processor
UpperCAmelCase_ = image_processor(
[np.zeros((3, 800, 1333)), np.zeros((3, 800, 1333))] , segmentation_maps=[np.zeros((384, 384)).astype(np.floataa), np.zeros((384, 384)).astype(np.floataa)] , return_tensors='''pt''' , )
UpperCAmelCase_ = inputs['''pixel_values'''].to(_snake_case)
UpperCAmelCase_ = [el.to(_snake_case) for el in inputs['''mask_labels''']]
UpperCAmelCase_ = [el.to(_snake_case) for el in inputs['''class_labels''']]
with torch.no_grad():
UpperCAmelCase_ = model(**_snake_case)
self.assertTrue(outputs.loss is not None)
| 51
| 0
|
"""simple docstring"""
def _snake_case ( lowercase__ : int = 1 , lowercase__ : int = 1_0_0_0 ) -> int:
'''simple docstring'''
lowerCAmelCase_ :List[Any] = 1
lowerCAmelCase_ :Optional[int] = 0
for divide_by_number in range(__A , digit + 1 ):
lowerCAmelCase_ :List[str] = []
lowerCAmelCase_ :Optional[Any] = numerator
for _ in range(1 , digit + 1 ):
if now_divide in has_been_divided:
if longest_list_length < len(__A ):
lowerCAmelCase_ :Tuple = len(__A )
lowerCAmelCase_ :Union[str, Any] = divide_by_number
else:
has_been_divided.append(__A )
lowerCAmelCase_ :Union[str, Any] = now_divide * 1_0 % divide_by_number
return the_digit
# Tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 84
|
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def A (__A : Optional[int] , __A : int , __A : str=None ) -> List[Any]:
"""simple docstring"""
assert torch_layer.weight.shape == weight.shape, F"""{torch_layer} layer.weight does not match"""
UpperCAmelCase_ = nn.Parameter(__A )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, F"""{torch_layer} layer.bias does not match"""
UpperCAmelCase_ = nn.Parameter(__A )
def A (__A : Tuple , __A : Dict , __A : str ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ = np.asarray(weights[0] )
UpperCAmelCase_ = np.asarray(weights[1] )
UpperCAmelCase_ = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(__A ).transpose(1 , 2 ).contiguous().view(-1 , __A ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(__A ).transpose(1 , 2 ).contiguous().view(-1 , __A ) , )
set_param(
torch_layer.output.dense , torch.tensor(__A ).view(-1 , __A ).contiguous().transpose(0 , 1 ) , )
def A (__A : Optional[Any] , __A : Any , __A : List[Any] ) -> int:
"""simple docstring"""
UpperCAmelCase_ = np.asarray(weights[0] )
UpperCAmelCase_ = np.asarray(weights[1] )
UpperCAmelCase_ = np.asarray(weights[2] )
UpperCAmelCase_ = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(__A ).transpose(1 , 2 ).contiguous().view(-1 , __A ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(__A ).transpose(1 , 2 ).contiguous().view(-1 , __A ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(__A ).transpose(1 , 2 ).contiguous().view(-1 , __A ) , )
set_param(
torch_layer.output.dense , torch.tensor(__A ).view(-1 , __A ).contiguous().transpose(0 , 1 ) , )
def A (__A : int , __A : Union[str, Any] , __A : List[str] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ = weights[0][0][0]
UpperCAmelCase_ = np.asarray(layer_norm_a[0] )
UpperCAmelCase_ = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(__A ) , torch.tensor(__A ) , )
# lsh weights + output
UpperCAmelCase_ = weights[0][1]
if len(__A ) < 4:
set_layer_weights_in_torch_lsh(__A , torch_block.attention , __A )
else:
set_layer_weights_in_torch_local(__A , torch_block.attention , __A )
# intermediate weighs
UpperCAmelCase_ = weights[2][0][1][2]
# Chunked Feed Forward
if len(__A ) == 4:
UpperCAmelCase_ = intermediate_weights[2]
# layernorm 2
UpperCAmelCase_ = np.asarray(intermediate_weights[0][0] )
UpperCAmelCase_ = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(__A ) , torch.tensor(__A ) , )
# intermediate dense
UpperCAmelCase_ = np.asarray(intermediate_weights[1][0] )
UpperCAmelCase_ = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(__A ).transpose(0 , 1 ).contiguous() , torch.tensor(__A ) , )
# intermediate out
UpperCAmelCase_ = np.asarray(intermediate_weights[4][0] )
UpperCAmelCase_ = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(__A ).transpose(0 , 1 ).contiguous() , torch.tensor(__A ) , )
def A (__A : Optional[int] , __A : Tuple , __A : Any ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ = torch_model.reformer
# word embeds
UpperCAmelCase_ = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(__A ) , )
if isinstance(weights[3] , __A ):
UpperCAmelCase_ = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
UpperCAmelCase_ = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), F"""{position_embeddings[emb_idx]} emb does not match"""
UpperCAmelCase_ = nn.Parameter(torch.tensor(__A ) )
UpperCAmelCase_ = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
__A ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
UpperCAmelCase_ = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(__A , __A , __A )
# output layer norm
UpperCAmelCase_ = np.asarray(weights[7][0] )
UpperCAmelCase_ = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(__A ) , torch.tensor(__A ) , )
# output embeddings
UpperCAmelCase_ = np.asarray(weights[9][0] )
UpperCAmelCase_ = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(__A ).transpose(0 , 1 ).contiguous() , torch.tensor(__A ) , )
def A (__A : Tuple , __A : int , __A : str ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ = ReformerConfig.from_json_file(__A )
print(F"""Building PyTorch model from configuration: {config}""" )
UpperCAmelCase_ = ReformerModelWithLMHead(__A )
with open(__A , '''rb''' ) as f:
UpperCAmelCase_ = pickle.load(__A )['''weights''']
set_model_weights_in_torch(__A , __A , config.hidden_size )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , __A )
if __name__ == "__main__":
snake_case_ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--trax_model_pkl_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained Reformer model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
snake_case_ : List[Any] = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 51
| 0
|
'''simple docstring'''
from argparse import ArgumentParser
from .env import EnvironmentCommand
def snake_case_ ( ) -> List[str]:
UpperCAmelCase : List[str] = ArgumentParser('''Diffusers CLI tool''' , usage='''diffusers-cli <command> [<args>]''' )
UpperCAmelCase : int = parser.add_subparsers(help='''diffusers-cli command helpers''' )
# Register commands
EnvironmentCommand.register_subcommand(__A )
# Let's go
UpperCAmelCase : List[Any] = parser.parse_args()
if not hasattr(__A , '''func''' ):
parser.print_help()
exit(1 )
# Run
UpperCAmelCase : Tuple = args.func(__A )
service.run()
if __name__ == "__main__":
main()
| 23
|
# This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
ControlNetModel,
DDIMScheduler,
StableDiffusionControlNetImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel
from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
)
enable_full_determinism()
class __snake_case ( a , a , a , unittest.TestCase ):
UpperCAmelCase__ : List[Any] = StableDiffusionControlNetImgaImgPipeline
UpperCAmelCase__ : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''}
UpperCAmelCase__ : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
UpperCAmelCase__ : Union[str, Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({'''control_image'''} )
UpperCAmelCase__ : Optional[Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
def lowerCamelCase ( self : int):
"""simple docstring"""
torch.manual_seed(0)
UpperCAmelCase_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
torch.manual_seed(0)
UpperCAmelCase_ = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
torch.manual_seed(0)
UpperCAmelCase_ = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='''scaled_linear''' , clip_sample=_snake_case , set_alpha_to_one=_snake_case , )
torch.manual_seed(0)
UpperCAmelCase_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0)
UpperCAmelCase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
UpperCAmelCase_ = CLIPTextModel(_snake_case)
UpperCAmelCase_ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''')
UpperCAmelCase_ = {
'''unet''': unet,
'''controlnet''': controlnet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def lowerCamelCase ( self : Union[str, Any] , _snake_case : Any , _snake_case : Dict=0):
"""simple docstring"""
if str(_snake_case).startswith('''mps'''):
UpperCAmelCase_ = torch.manual_seed(_snake_case)
else:
UpperCAmelCase_ = torch.Generator(device=_snake_case).manual_seed(_snake_case)
UpperCAmelCase_ = 2
UpperCAmelCase_ = randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=_snake_case , device=torch.device(_snake_case) , )
UpperCAmelCase_ = floats_tensor(control_image.shape , rng=random.Random(_snake_case)).to(_snake_case)
UpperCAmelCase_ = image.cpu().permute(0 , 2 , 3 , 1)[0]
UpperCAmelCase_ = Image.fromarray(np.uinta(_snake_case)).convert('''RGB''').resize((64, 64))
UpperCAmelCase_ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
'''image''': image,
'''control_image''': control_image,
}
return inputs
def lowerCamelCase ( self : Any):
"""simple docstring"""
return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3)
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def lowerCamelCase ( self : Any):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3)
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
self._test_inference_batch_single_identical(expected_max_diff=2e-3)
class __snake_case ( a , a , unittest.TestCase ):
UpperCAmelCase__ : str = StableDiffusionControlNetImgaImgPipeline
UpperCAmelCase__ : List[str] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''}
UpperCAmelCase__ : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
UpperCAmelCase__ : str = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess
def lowerCamelCase ( self : str):
"""simple docstring"""
torch.manual_seed(0)
UpperCAmelCase_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
torch.manual_seed(0)
def init_weights(_snake_case : Optional[int]):
if isinstance(_snake_case , torch.nn.Convad):
torch.nn.init.normal(m.weight)
m.bias.data.fill_(1.0)
UpperCAmelCase_ = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(_snake_case)
torch.manual_seed(0)
UpperCAmelCase_ = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(_snake_case)
torch.manual_seed(0)
UpperCAmelCase_ = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='''scaled_linear''' , clip_sample=_snake_case , set_alpha_to_one=_snake_case , )
torch.manual_seed(0)
UpperCAmelCase_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0)
UpperCAmelCase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
UpperCAmelCase_ = CLIPTextModel(_snake_case)
UpperCAmelCase_ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''')
UpperCAmelCase_ = MultiControlNetModel([controlneta, controlneta])
UpperCAmelCase_ = {
'''unet''': unet,
'''controlnet''': controlnet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def lowerCamelCase ( self : int , _snake_case : Union[str, Any] , _snake_case : str=0):
"""simple docstring"""
if str(_snake_case).startswith('''mps'''):
UpperCAmelCase_ = torch.manual_seed(_snake_case)
else:
UpperCAmelCase_ = torch.Generator(device=_snake_case).manual_seed(_snake_case)
UpperCAmelCase_ = 2
UpperCAmelCase_ = [
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=_snake_case , device=torch.device(_snake_case) , ),
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=_snake_case , device=torch.device(_snake_case) , ),
]
UpperCAmelCase_ = floats_tensor(control_image[0].shape , rng=random.Random(_snake_case)).to(_snake_case)
UpperCAmelCase_ = image.cpu().permute(0 , 2 , 3 , 1)[0]
UpperCAmelCase_ = Image.fromarray(np.uinta(_snake_case)).convert('''RGB''').resize((64, 64))
UpperCAmelCase_ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
'''image''': image,
'''control_image''': control_image,
}
return inputs
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = self.get_dummy_components()
UpperCAmelCase_ = self.pipeline_class(**_snake_case)
pipe.to(_snake_case)
UpperCAmelCase_ = 1_0.0
UpperCAmelCase_ = 4
UpperCAmelCase_ = self.get_dummy_inputs(_snake_case)
UpperCAmelCase_ = steps
UpperCAmelCase_ = scale
UpperCAmelCase_ = pipe(**_snake_case)[0]
UpperCAmelCase_ = self.get_dummy_inputs(_snake_case)
UpperCAmelCase_ = steps
UpperCAmelCase_ = scale
UpperCAmelCase_ = pipe(**_snake_case , control_guidance_start=0.1 , control_guidance_end=0.2)[0]
UpperCAmelCase_ = self.get_dummy_inputs(_snake_case)
UpperCAmelCase_ = steps
UpperCAmelCase_ = scale
UpperCAmelCase_ = pipe(**_snake_case , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7])[0]
UpperCAmelCase_ = self.get_dummy_inputs(_snake_case)
UpperCAmelCase_ = steps
UpperCAmelCase_ = scale
UpperCAmelCase_ = pipe(**_snake_case , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8])[0]
# make sure that all outputs are different
assert np.sum(np.abs(output_a - output_a)) > 1e-3
assert np.sum(np.abs(output_a - output_a)) > 1e-3
assert np.sum(np.abs(output_a - output_a)) > 1e-3
def lowerCamelCase ( self : Dict):
"""simple docstring"""
return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3)
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def lowerCamelCase ( self : int):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3)
def lowerCamelCase ( self : int):
"""simple docstring"""
self._test_inference_batch_single_identical(expected_max_diff=2e-3)
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
UpperCAmelCase_ = self.get_dummy_components()
UpperCAmelCase_ = self.pipeline_class(**_snake_case)
pipe.to(_snake_case)
pipe.set_progress_bar_config(disable=_snake_case)
with tempfile.TemporaryDirectory() as tmpdir:
try:
# save_pretrained is not implemented for Multi-ControlNet
pipe.save_pretrained(_snake_case)
except NotImplementedError:
pass
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
UpperCAmelCase_ = ControlNetModel.from_pretrained('''lllyasviel/sd-controlnet-canny''')
UpperCAmelCase_ = StableDiffusionControlNetImgaImgPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , safety_checker=_snake_case , controlnet=_snake_case)
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=_snake_case)
UpperCAmelCase_ = torch.Generator(device='''cpu''').manual_seed(0)
UpperCAmelCase_ = '''evil space-punk bird'''
UpperCAmelCase_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png''').resize((512, 512))
UpperCAmelCase_ = load_image(
'''https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png''').resize((512, 512))
UpperCAmelCase_ = pipe(
_snake_case , _snake_case , control_image=_snake_case , generator=_snake_case , output_type='''np''' , num_inference_steps=50 , strength=0.6 , )
UpperCAmelCase_ = output.images[0]
assert image.shape == (512, 512, 3)
UpperCAmelCase_ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy''')
assert np.abs(expected_image - image).max() < 9e-2
| 51
| 0
|
import copy
from typing import Dict, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
from ..detr import DetrConfig
from ..swin import SwinConfig
_UpperCAmelCase : Optional[int] = {
"facebook/maskformer-swin-base-ade": (
"https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json"
)
# See all MaskFormer models at https://huggingface.co/models?filter=maskformer
}
_UpperCAmelCase : List[str] = logging.get_logger(__name__)
class lowerCAmelCase ( __UpperCamelCase ):
UpperCAmelCase__ = '''maskformer'''
UpperCAmelCase__ = {'''hidden_size''': '''mask_feature_size'''}
UpperCAmelCase__ = ['''resnet''', '''swin''']
UpperCAmelCase__ = ['''detr''']
def __init__( self : Optional[int] , UpperCAmelCase : int = 256 , UpperCAmelCase : int = 256 , UpperCAmelCase : float = 0.1 , UpperCAmelCase : bool = False , UpperCAmelCase : Optional[Dict] = None , UpperCAmelCase : Optional[Dict] = None , UpperCAmelCase : float = 0.0_2 , UpperCAmelCase : float = 1.0 , UpperCAmelCase : float = 1.0 , UpperCAmelCase : float = 1.0 , UpperCAmelCase : float = 20.0 , UpperCAmelCase : Optional[bool] = None , **UpperCAmelCase : List[str] , ) -> Optional[Any]:
if backbone_config is None:
# fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k
lowerCamelCase__ : Dict = SwinConfig(
image_size=384 , in_channels=3 , patch_size=4 , embed_dim=128 , depths=[2, 2, 18, 2] , num_heads=[4, 8, 16, 32] , window_size=12 , drop_path_rate=0.3 , out_features=['stage1', 'stage2', 'stage3', 'stage4'] , )
if isinstance(_snake_case , _snake_case ):
lowerCamelCase__ : List[Any] = backbone_config.pop('model_type' )
lowerCamelCase__ : List[Any] = CONFIG_MAPPING[backbone_model_type]
lowerCamelCase__ : Optional[int] = config_class.from_dict(_snake_case )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
F"""Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. """
F"""Supported model types: {",".join(self.backbones_supported )}""" )
if decoder_config is None:
# fall back to https://huggingface.co/facebook/detr-resnet-50
lowerCamelCase__ : Any = DetrConfig()
else:
# verify that the decoder is supported
lowerCamelCase__ : List[str] = (
decoder_config.pop('model_type' ) if isinstance(_snake_case , _snake_case ) else decoder_config.model_type
)
if decoder_type not in self.decoders_supported:
raise ValueError(
F"""Transformer Decoder {decoder_type} not supported, please use one of"""
F""" {",".join(self.decoders_supported )}""" )
if isinstance(_snake_case , _snake_case ):
lowerCamelCase__ : Any = CONFIG_MAPPING[decoder_type]
lowerCamelCase__ : int = config_class.from_dict(_snake_case )
lowerCamelCase__ : List[Any] = backbone_config
lowerCamelCase__ : List[str] = decoder_config
# main feature dimension for the model
lowerCamelCase__ : List[str] = fpn_feature_size
lowerCamelCase__ : Dict = mask_feature_size
# initializer
lowerCamelCase__ : List[str] = init_std
lowerCamelCase__ : Optional[Any] = init_xavier_std
# Hungarian matcher && loss
lowerCamelCase__ : Tuple = cross_entropy_weight
lowerCamelCase__ : Dict = dice_weight
lowerCamelCase__ : Optional[Any] = mask_weight
lowerCamelCase__ : str = use_auxiliary_loss
lowerCamelCase__ : int = no_object_weight
lowerCamelCase__ : Tuple = output_auxiliary_logits
lowerCamelCase__ : Optional[int] = self.decoder_config.encoder_attention_heads
lowerCamelCase__ : Tuple = self.decoder_config.num_hidden_layers
super().__init__(**_snake_case )
@classmethod
def A_ ( cls : List[Any] , UpperCAmelCase : PretrainedConfig , UpperCAmelCase : PretrainedConfig , **UpperCAmelCase : Any ) -> str:
return cls(
backbone_config=_snake_case , decoder_config=_snake_case , **_snake_case , )
def A_ ( self : Dict ) -> Optional[Any]:
lowerCamelCase__ : Tuple = copy.deepcopy(self.__dict__ )
lowerCamelCase__ : List[Any] = self.backbone_config.to_dict()
lowerCamelCase__ : Tuple = self.decoder_config.to_dict()
lowerCamelCase__ : Tuple = self.__class__.model_type
return output
| 50
|
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
snake_case_ : Tuple = logging.get_logger(__name__)
def A (__A : bool , __A : bool ) -> Optional[Any]:
"""simple docstring"""
def run_func(__A : Optional[Any] ):
@wraps(__A )
def run_in_eager_mode(*__A : Dict , **__A : List[Any] ):
return func(*__A , **__A )
@wraps(__A )
@tf.function(experimental_compile=__A )
def run_in_graph_mode(*__A : Optional[Any] , **__A : Any ):
return func(*__A , **__A )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
'''Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.''' )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def A (__A : int , __A : int , __A : int ) -> ["tf.Tensor"]:
"""simple docstring"""
UpperCAmelCase_ = random.Random()
UpperCAmelCase_ = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(__A , shape=(batch_size, sequence_length) , dtype=tf.intaa )
class __snake_case ( a ):
UpperCAmelCase__ : TensorFlowBenchmarkArguments
UpperCAmelCase__ : PretrainedConfig
UpperCAmelCase__ : str = "TensorFlow"
@property
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
return tf.__version__
def lowerCamelCase ( self : Dict , _snake_case : str , _snake_case : int , _snake_case : int):
"""simple docstring"""
UpperCAmelCase_ = self.args.strategy
if strategy is None:
raise ValueError('''A device strategy has to be initialized before using TensorFlow.''')
UpperCAmelCase_ = self._prepare_inference_func(_snake_case , _snake_case , _snake_case)
return self._measure_speed(_inference)
def lowerCamelCase ( self : Any , _snake_case : str , _snake_case : int , _snake_case : int):
"""simple docstring"""
UpperCAmelCase_ = self.args.strategy
if strategy is None:
raise ValueError('''A device strategy has to be initialized before using TensorFlow.''')
UpperCAmelCase_ = self._prepare_train_func(_snake_case , _snake_case , _snake_case)
return self._measure_speed(_train)
def lowerCamelCase ( self : Any , _snake_case : str , _snake_case : int , _snake_case : int):
"""simple docstring"""
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , _snake_case)
UpperCAmelCase_ = self.args.strategy
if strategy is None:
raise ValueError('''A device strategy has to be initialized before using TensorFlow.''')
UpperCAmelCase_ = self._prepare_inference_func(_snake_case , _snake_case , _snake_case)
return self._measure_memory(_inference)
def lowerCamelCase ( self : Optional[Any] , _snake_case : str , _snake_case : int , _snake_case : int):
"""simple docstring"""
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , _snake_case)
UpperCAmelCase_ = self.args.strategy
if strategy is None:
raise ValueError('''A device strategy has to be initialized before using TensorFlow.''')
UpperCAmelCase_ = self._prepare_train_func(_snake_case , _snake_case , _snake_case)
return self._measure_memory(_train)
def lowerCamelCase ( self : Optional[int] , _snake_case : str , _snake_case : int , _snake_case : int):
"""simple docstring"""
UpperCAmelCase_ = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError('''Mixed precision is currently not supported.''')
UpperCAmelCase_ = (
hasattr(_snake_case , '''architectures''')
and isinstance(config.architectures , _snake_case)
and len(config.architectures) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
UpperCAmelCase_ = '''TF''' + config.architectures[0] # prepend 'TF' for tensorflow model
UpperCAmelCase_ = __import__('''transformers''' , fromlist=[model_class])
UpperCAmelCase_ = getattr(_snake_case , _snake_case)
UpperCAmelCase_ = model_cls(_snake_case)
except ImportError:
raise ImportError(
F"""{model_class} does not exist. If you just want to test the pretrained model, you might want to"""
''' set `--only_pretrain_model` or `args.only_pretrain_model=True`.''')
else:
UpperCAmelCase_ = TF_MODEL_MAPPING[config.__class__](_snake_case)
# encoder-decoder has vocab size saved differently
UpperCAmelCase_ = config.vocab_size if hasattr(_snake_case , '''vocab_size''') else config.encoder.vocab_size
UpperCAmelCase_ = random_input_ids(_snake_case , _snake_case , _snake_case)
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla)
def encoder_decoder_forward():
return model(_snake_case , decoder_input_ids=_snake_case , training=_snake_case)
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla)
def encoder_forward():
return model(_snake_case , training=_snake_case)
UpperCAmelCase_ = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def lowerCamelCase ( self : Optional[Any] , _snake_case : str , _snake_case : int , _snake_case : int):
"""simple docstring"""
UpperCAmelCase_ = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError('''Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.''')
if self.args.fpaa:
raise NotImplementedError('''Mixed precision is currently not supported.''')
UpperCAmelCase_ = (
hasattr(_snake_case , '''architectures''')
and isinstance(config.architectures , _snake_case)
and len(config.architectures) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
UpperCAmelCase_ = '''TF''' + config.architectures[0] # prepend 'TF' for tensorflow model
UpperCAmelCase_ = __import__('''transformers''' , fromlist=[model_class])
UpperCAmelCase_ = getattr(_snake_case , _snake_case)
UpperCAmelCase_ = model_cls(_snake_case)
except ImportError:
raise ImportError(
F"""{model_class} does not exist. If you just want to test the pretrained model, you might want to"""
''' set `--only_pretrain_model` or `args.only_pretrain_model=True`.''')
else:
UpperCAmelCase_ = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](_snake_case)
# encoder-decoder has vocab size saved differently
UpperCAmelCase_ = config.vocab_size if hasattr(_snake_case , '''vocab_size''') else config.encoder.vocab_size
UpperCAmelCase_ = random_input_ids(_snake_case , _snake_case , _snake_case)
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla)
def encoder_decoder_train():
UpperCAmelCase_ = model(_snake_case , decoder_input_ids=_snake_case , labels=_snake_case , training=_snake_case)[0]
UpperCAmelCase_ = tf.gradients(_snake_case , model.trainable_variables)
return gradients
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla)
def encoder_train():
UpperCAmelCase_ = model(_snake_case , labels=_snake_case , training=_snake_case)[0]
UpperCAmelCase_ = tf.gradients(_snake_case , model.trainable_variables)
return gradients
UpperCAmelCase_ = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def lowerCamelCase ( self : Any , _snake_case : Optional[Any]):
"""simple docstring"""
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info('''Do inference on TPU. Running model 5 times to stabilize compilation''')
timeit.repeat(_snake_case , repeat=1 , number=5)
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
UpperCAmelCase_ = timeit.repeat(
_snake_case , repeat=self.args.repeat , number=10 , )
return min(_snake_case) / 1_0.0
except ResourceExhaustedError as e:
self.print_fn(F"""Doesn't fit on GPU. {e}""")
def lowerCamelCase ( self : Dict , _snake_case : Callable[[], None]):
"""simple docstring"""
logger.info(
'''Note that TensorFlow allocates more memory than '''
'''it might need to speed up computation. '''
'''The memory reported here corresponds to the memory '''
'''reported by `nvidia-smi`, which can vary depending '''
'''on total available memory on the GPU that is used.''')
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
'''`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory'''
''' consumption line by line.''')
UpperCAmelCase_ = start_memory_tracing('''transformers''')
if self.args.is_tpu:
# tpu
raise NotImplementedError(
'''Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking'''
''' with `args.memory=False`''')
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
'''py3nvml not installed, we won\'t log GPU memory usage. '''
'''Install py3nvml (pip install py3nvml) to log information about GPU.''')
UpperCAmelCase_ = '''N/A'''
else:
logger.info(
'''Measuring total GPU usage on GPU device. Make sure to not have additional processes'''
''' running on the same GPU.''')
# init nvml
nvml.nvmlInit()
func()
UpperCAmelCase_ = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx)
UpperCAmelCase_ = nvml.nvmlDeviceGetMemoryInfo(_snake_case)
UpperCAmelCase_ = meminfo.used
UpperCAmelCase_ = Memory(_snake_case)
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
'''When enabling line by line tracing, the max peak memory for CPU is inaccurate in'''
''' TensorFlow.''')
UpperCAmelCase_ = None
else:
UpperCAmelCase_ = measure_peak_memory_cpu(_snake_case)
UpperCAmelCase_ = Memory(_snake_case) if isinstance(_snake_case , _snake_case) else memory_bytes
if self.args.trace_memory_line_by_line:
UpperCAmelCase_ = stop_memory_tracing(_snake_case)
if memory is None:
UpperCAmelCase_ = summary.total
else:
UpperCAmelCase_ = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(F"""Doesn't fit on GPU. {e}""")
return "N/A", None
| 51
| 0
|
def lowerCAmelCase_ ( _lowercase : str , _lowercase : str) -> float:
"""simple docstring"""
def get_matched_characters(_lowercase : str , _lowercase : str) -> str:
a__ : List[str] = []
a__ : int = min(len(_stra) , len(_stra)) // 2
for i, l in enumerate(_stra):
a__ : Dict = int(max(0 , i - limit))
a__ : List[str] = int(min(i + limit + 1 , len(_stra)))
if l in _stra[left:right]:
matched.append(__A)
a__ : List[str] = F'''{_stra[0:_stra.index(__A)]} {_stra[_stra.index(__A) + 1:]}'''
return "".join(__A)
# matching characters
a__ : List[str] = get_matched_characters(__A , __A)
a__ : Dict = get_matched_characters(__A , __A)
a__ : Tuple = len(__A)
# transposition
a__ : Union[str, Any] = (
len([(ca, ca) for ca, ca in zip(__A , __A) if ca != ca]) // 2
)
if not match_count:
a__ : Tuple = 0.0
else:
a__ : Any = (
1
/ 3
* (
match_count / len(__A)
+ match_count / len(__A)
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
a__ : Tuple = 0
for ca, ca in zip(stra[:4] , stra[:4]):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler("hello", "world"))
| 170
|
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class __snake_case :
@staticmethod
def lowerCamelCase ( *_snake_case : Optional[int] , **_snake_case : int):
"""simple docstring"""
pass
def A (__A : Image ) -> str:
"""simple docstring"""
UpperCAmelCase_ = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class __snake_case ( unittest.TestCase ):
UpperCAmelCase__ : Tuple = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def lowerCamelCase ( self : Tuple , _snake_case : Optional[Any] , _snake_case : Optional[Any] , _snake_case : List[str]):
"""simple docstring"""
UpperCAmelCase_ = DepthEstimationPipeline(model=_snake_case , image_processor=_snake_case)
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def lowerCamelCase ( self : str , _snake_case : Optional[int] , _snake_case : List[str]):
"""simple docstring"""
UpperCAmelCase_ = depth_estimator('''./tests/fixtures/tests_samples/COCO/000000039769.png''')
self.assertEqual({'''predicted_depth''': ANY(torch.Tensor), '''depth''': ANY(Image.Image)} , _snake_case)
import datasets
UpperCAmelCase_ = datasets.load_dataset('''hf-internal-testing/fixtures_image_utils''' , '''image''' , split='''test''')
UpperCAmelCase_ = depth_estimator(
[
Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png'''),
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
# RGBA
dataset[0]['''file'''],
# LA
dataset[1]['''file'''],
# L
dataset[2]['''file'''],
])
self.assertEqual(
[
{'''predicted_depth''': ANY(torch.Tensor), '''depth''': ANY(Image.Image)},
{'''predicted_depth''': ANY(torch.Tensor), '''depth''': ANY(Image.Image)},
{'''predicted_depth''': ANY(torch.Tensor), '''depth''': ANY(Image.Image)},
{'''predicted_depth''': ANY(torch.Tensor), '''depth''': ANY(Image.Image)},
{'''predicted_depth''': ANY(torch.Tensor), '''depth''': ANY(Image.Image)},
] , _snake_case , )
@require_tf
@unittest.skip('''Depth estimation is not implemented in TF''')
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
pass
@slow
@require_torch
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = '''Intel/dpt-large'''
UpperCAmelCase_ = pipeline('''depth-estimation''' , model=_snake_case)
UpperCAmelCase_ = depth_estimator('''http://images.cocodataset.org/val2017/000000039769.jpg''')
UpperCAmelCase_ = hashimage(outputs['''depth'''])
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs['''predicted_depth'''].max().item()) , 2_9.3_0_4)
self.assertEqual(nested_simplify(outputs['''predicted_depth'''].min().item()) , 2.6_6_2)
@require_torch
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
self.skipTest('''There is not hf-internal-testing tiny model for either GLPN nor DPT''')
| 51
| 0
|
def A_ ( snake_case : float ) -> float:
'''simple docstring'''
return 10 - x * x
def A_ ( snake_case : float , snake_case : float ) -> float:
'''simple docstring'''
if equation(__A ) * equation(__A ) >= 0:
raise ValueError('''Wrong space!''' )
__UpperCamelCase = a
while (b - a) >= 0.01:
# Find middle point
__UpperCamelCase = (a + b) / 2
# Check if middle point is root
if equation(__A ) == 0.0:
break
# Decide the side to repeat the steps
if equation(__A ) * equation(__A ) < 0:
__UpperCamelCase = c
else:
__UpperCamelCase = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 328
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
snake_case_ : int = {
"configuration_deberta": ["DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP", "DebertaConfig", "DebertaOnnxConfig"],
"tokenization_deberta": ["DebertaTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : int = ["DebertaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : List[str] = [
"DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"DebertaForMaskedLM",
"DebertaForQuestionAnswering",
"DebertaForSequenceClassification",
"DebertaForTokenClassification",
"DebertaModel",
"DebertaPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Any = [
"TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFDebertaForMaskedLM",
"TFDebertaForQuestionAnswering",
"TFDebertaForSequenceClassification",
"TFDebertaForTokenClassification",
"TFDebertaModel",
"TFDebertaPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig
from .tokenization_deberta import DebertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_deberta_fast import DebertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deberta import (
DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
DebertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deberta import (
TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDebertaForMaskedLM,
TFDebertaForQuestionAnswering,
TFDebertaForSequenceClassification,
TFDebertaForTokenClassification,
TFDebertaModel,
TFDebertaPreTrainedModel,
)
else:
import sys
snake_case_ : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 51
| 0
|
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> int:
return int((input_a, input_a).count(1 ) != 0 )
def UpperCAmelCase__ ( ) -> None:
assert or_gate(0, 0 ) == 0
assert or_gate(0, 1 ) == 1
assert or_gate(1, 0 ) == 1
assert or_gate(1, 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 162
|
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
snake_case_ : Union[str, Any] = {"configuration_gpt_neox": ["GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoXConfig"]}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Dict = ["GPTNeoXTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : str = [
"GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTNeoXForCausalLM",
"GPTNeoXForQuestionAnswering",
"GPTNeoXForSequenceClassification",
"GPTNeoXForTokenClassification",
"GPTNeoXLayer",
"GPTNeoXModel",
"GPTNeoXPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox import (
GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXLayer,
GPTNeoXModel,
GPTNeoXPreTrainedModel,
)
else:
import sys
snake_case_ : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 51
| 0
|
'''simple docstring'''
def lowercase_ ( _lowercase ) -> list[int]:
'''simple docstring'''
lowerCamelCase_ : List[Any] = [0 for i in range(len(__A ) )]
# initialize interval's left pointer and right pointer
lowerCamelCase_, lowerCamelCase_ : Union[str, Any] = 0, 0
for i in range(1 , len(__A ) ):
# case when current index is inside the interval
if i <= right_pointer:
lowerCamelCase_ : Union[str, Any] = min(right_pointer - i + 1 , z_result[i - left_pointer] )
lowerCamelCase_ : Dict = min_edge
while go_next(__A , __A , __A ):
z_result[i] += 1
# if new index's result gives us more right interval,
# we've to update left_pointer and right_pointer
if i + z_result[i] - 1 > right_pointer:
lowerCamelCase_, lowerCamelCase_ : List[Any] = i, i + z_result[i] - 1
return z_result
def lowercase_ ( _lowercase , _lowercase , _lowercase ) -> bool:
'''simple docstring'''
return i + z_result[i] < len(__A ) and s[z_result[i]] == s[i + z_result[i]]
def lowercase_ ( _lowercase , _lowercase ) -> int:
'''simple docstring'''
lowerCamelCase_ : Any = 0
# concatenate 'pattern' and 'input_str' and call z_function
# with concatenated string
lowerCamelCase_ : Any = z_function(pattern + input_str )
for val in z_result:
# if value is greater then length of the pattern string
# that means this index is starting position of substring
# which is equal to pattern string
if val >= len(__A ):
answer += 1
return answer
if __name__ == "__main__":
import doctest
doctest.testmod()
| 318
|
def A (__A : list , __A : int , __A : int = 0 , __A : int = 0 ) -> int:
"""simple docstring"""
UpperCAmelCase_ = right or len(__A ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(__A , __A , left + 1 , right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 51
| 0
|
from __future__ import annotations
__snake_case = 1_0
def _A ( _lowercase ) -> list[int]:
"""simple docstring"""
__UpperCamelCase = 1
__UpperCamelCase = max(__A )
while placement <= max_digit:
# declare and initialize empty buckets
__UpperCamelCase = [[] for _ in range(__A )]
# split list_of_ints between the buckets
for i in list_of_ints:
__UpperCamelCase = int((i / placement) % RADIX )
buckets[tmp].append(__A )
# put each buckets' contents into list_of_ints
__UpperCamelCase = 0
for b in range(__A ):
for i in buckets[b]:
__UpperCamelCase = i
a += 1
# move to next
placement *= RADIX
return list_of_ints
if __name__ == "__main__":
import doctest
doctest.testmod()
| 310
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ : int = logging.get_logger(__name__)
snake_case_ : str = {}
class __snake_case ( a ):
UpperCAmelCase__ : str = '''llama'''
UpperCAmelCase__ : Dict = ['''past_key_values''']
def __init__( self : str , _snake_case : List[str]=32000 , _snake_case : int=4096 , _snake_case : List[str]=11008 , _snake_case : Optional[int]=32 , _snake_case : List[Any]=32 , _snake_case : Tuple=None , _snake_case : int="silu" , _snake_case : List[Any]=2048 , _snake_case : List[str]=0.0_2 , _snake_case : Any=1e-6 , _snake_case : List[str]=True , _snake_case : Optional[Any]=0 , _snake_case : Dict=1 , _snake_case : List[Any]=2 , _snake_case : str=1 , _snake_case : Union[str, Any]=False , _snake_case : str=None , **_snake_case : List[Any] , ):
"""simple docstring"""
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = num_key_value_heads
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = rms_norm_eps
UpperCAmelCase_ = pretraining_tp
UpperCAmelCase_ = use_cache
UpperCAmelCase_ = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=_snake_case , bos_token_id=_snake_case , eos_token_id=_snake_case , tie_word_embeddings=_snake_case , **_snake_case , )
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , _snake_case) or len(self.rope_scaling) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
F"""got {self.rope_scaling}""")
UpperCAmelCase_ = self.rope_scaling.get('''type''' , _snake_case)
UpperCAmelCase_ = self.rope_scaling.get('''factor''' , _snake_case)
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""")
if rope_scaling_factor is None or not isinstance(_snake_case , _snake_case) or rope_scaling_factor <= 1.0:
raise ValueError(F"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""")
| 51
| 0
|
"""simple docstring"""
from __future__ import annotations
def __lowerCamelCase ( __UpperCamelCase ) -> bool:
"""simple docstring"""
lowerCAmelCase_ : List[Any] = len(__A )
# We need to create solution object to save path.
lowerCAmelCase_ : List[Any] = [[0 for _ in range(__A )] for _ in range(__A )]
lowerCAmelCase_ : int = run_maze(__A , 0 , 0 , __A )
if solved:
print("\n".join(str(__A ) for row in solutions ) )
else:
print("No solution exists!" )
return solved
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> bool:
"""simple docstring"""
lowerCAmelCase_ : Union[str, Any] = len(__A )
# Final check point.
if i == j == (size - 1):
lowerCAmelCase_ : str = 1
return True
lowerCAmelCase_ : int = (not i < 0) and (not j < 0) # Check lower bounds
lowerCAmelCase_ : str = (i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
lowerCAmelCase_ : int = (not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
lowerCAmelCase_ : List[str] = 1
# check for directions
if (
run_maze(__A , i + 1 , __A , __A )
or run_maze(__A , __A , j + 1 , __A )
or run_maze(__A , i - 1 , __A , __A )
or run_maze(__A , __A , j - 1 , __A )
):
return True
lowerCAmelCase_ : Union[str, Any] = 0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 241
|
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
snake_case_ : List[str] = logging.get_logger(__name__)
snake_case_ : Tuple = {
"Salesforce/codegen-350M-nl": "https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json",
"Salesforce/codegen-350M-multi": "https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json",
"Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json",
"Salesforce/codegen-2B-nl": "https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json",
"Salesforce/codegen-2B-multi": "https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json",
"Salesforce/codegen-2B-mono": "https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json",
"Salesforce/codegen-6B-nl": "https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json",
"Salesforce/codegen-6B-multi": "https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json",
"Salesforce/codegen-6B-mono": "https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json",
"Salesforce/codegen-16B-nl": "https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json",
"Salesforce/codegen-16B-multi": "https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json",
"Salesforce/codegen-16B-mono": "https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json",
}
class __snake_case ( a ):
UpperCAmelCase__ : str = '''codegen'''
UpperCAmelCase__ : int = {
'''max_position_embeddings''': '''n_positions''',
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self : Union[str, Any] , _snake_case : Union[str, Any]=50400 , _snake_case : Optional[int]=2048 , _snake_case : Union[str, Any]=2048 , _snake_case : List[str]=4096 , _snake_case : Any=28 , _snake_case : List[str]=16 , _snake_case : int=64 , _snake_case : Tuple=None , _snake_case : Dict="gelu_new" , _snake_case : Union[str, Any]=0.0 , _snake_case : Optional[Any]=0.0 , _snake_case : List[Any]=0.0 , _snake_case : List[Any]=1e-5 , _snake_case : List[str]=0.0_2 , _snake_case : Optional[Any]=True , _snake_case : int=50256 , _snake_case : Tuple=50256 , _snake_case : int=False , **_snake_case : Any , ):
"""simple docstring"""
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = n_ctx
UpperCAmelCase_ = n_positions
UpperCAmelCase_ = n_embd
UpperCAmelCase_ = n_layer
UpperCAmelCase_ = n_head
UpperCAmelCase_ = n_inner
UpperCAmelCase_ = rotary_dim
UpperCAmelCase_ = activation_function
UpperCAmelCase_ = resid_pdrop
UpperCAmelCase_ = embd_pdrop
UpperCAmelCase_ = attn_pdrop
UpperCAmelCase_ = layer_norm_epsilon
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = use_cache
UpperCAmelCase_ = bos_token_id
UpperCAmelCase_ = eos_token_id
super().__init__(
bos_token_id=_snake_case , eos_token_id=_snake_case , tie_word_embeddings=_snake_case , **_snake_case)
class __snake_case ( a ):
def __init__( self : Tuple , _snake_case : PretrainedConfig , _snake_case : str = "default" , _snake_case : List[PatchingSpec] = None , _snake_case : bool = False , ):
"""simple docstring"""
super().__init__(_snake_case , task=_snake_case , patching_specs=_snake_case , use_past=_snake_case)
if not getattr(self._config , '''pad_token_id''' , _snake_case):
# TODO: how to do that better?
UpperCAmelCase_ = 0
@property
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}})
if self.use_past:
self.fill_with_past_key_values_(_snake_case , direction='''inputs''')
UpperCAmelCase_ = {0: '''batch''', 1: '''past_sequence + sequence'''}
else:
UpperCAmelCase_ = {0: '''batch''', 1: '''sequence'''}
return common_inputs
@property
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
return self._config.n_layer
@property
def lowerCamelCase ( self : int):
"""simple docstring"""
return self._config.n_head
def lowerCamelCase ( self : Optional[int] , _snake_case : PreTrainedTokenizer , _snake_case : int = -1 , _snake_case : int = -1 , _snake_case : bool = False , _snake_case : Optional[TensorType] = None , ):
"""simple docstring"""
UpperCAmelCase_ = super(_snake_case , self).generate_dummy_inputs(
_snake_case , batch_size=_snake_case , seq_length=_snake_case , is_pair=_snake_case , framework=_snake_case)
# We need to order the input in the way they appears in the forward()
UpperCAmelCase_ = OrderedDict({'''input_ids''': common_inputs['''input_ids''']})
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''')
else:
import torch
UpperCAmelCase_ , UpperCAmelCase_ = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
UpperCAmelCase_ = seqlen + 2
UpperCAmelCase_ = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
UpperCAmelCase_ = [
(torch.zeros(_snake_case), torch.zeros(_snake_case)) for _ in range(self.num_layers)
]
UpperCAmelCase_ = common_inputs['''attention_mask''']
if self.use_past:
UpperCAmelCase_ = ordered_inputs['''attention_mask'''].dtype
UpperCAmelCase_ = torch.cat(
[ordered_inputs['''attention_mask'''], torch.ones(_snake_case , _snake_case , dtype=_snake_case)] , dim=1)
return ordered_inputs
@property
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
return 13
| 51
| 0
|
'''simple docstring'''
def lowerCAmelCase (__A):
"""simple docstring"""
return "".join([hex(__A)[2:].zfill(2).upper() for byte in list(__A)])
def lowerCAmelCase (__A):
"""simple docstring"""
if (len(__A) % 2) != 0:
raise ValueError(
'''Base16 encoded data is invalid:
Data does not have an even number of hex digits.''')
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(__A) <= set('''0123456789ABCDEF'''):
raise ValueError(
'''Base16 encoded data is invalid:
Data is not uppercase hex or it contains invalid characters.''')
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16) for i in range(0 , len(__A) , 2))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 211
|
import os
import unittest
from transformers.models.phobert.tokenization_phobert import VOCAB_FILES_NAMES, PhobertTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class __snake_case ( a , unittest.TestCase ):
UpperCAmelCase__ : Any = PhobertTokenizer
UpperCAmelCase__ : List[str] = False
def lowerCamelCase ( self : str):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCAmelCase_ = ['''T@@''', '''i''', '''I''', '''R@@''', '''r''', '''e@@''']
UpperCAmelCase_ = dict(zip(_snake_case , range(len(_snake_case))))
UpperCAmelCase_ = ['''#version: 0.2''', '''l à</w>''']
UpperCAmelCase_ = {'''unk_token''': '''<unk>'''}
UpperCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''])
UpperCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''])
with open(self.vocab_file , '''w''' , encoding='''utf-8''') as fp:
for token in vocab_tokens:
fp.write(F"""{token} {vocab_tokens[token]}\n""")
with open(self.merges_file , '''w''' , encoding='''utf-8''') as fp:
fp.write('''\n'''.join(_snake_case))
def lowerCamelCase ( self : int , **_snake_case : Any):
"""simple docstring"""
kwargs.update(self.special_tokens_map)
return PhobertTokenizer.from_pretrained(self.tmpdirname , **_snake_case)
def lowerCamelCase ( self : Optional[Any] , _snake_case : Union[str, Any]):
"""simple docstring"""
UpperCAmelCase_ = '''Tôi là VinAI Research'''
UpperCAmelCase_ = '''T<unk> i <unk> <unk> <unk> <unk> <unk> <unk> I Re<unk> e<unk> <unk> <unk> <unk>'''
return input_text, output_text
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = PhobertTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map)
UpperCAmelCase_ = '''Tôi là VinAI Research'''
UpperCAmelCase_ = '''T@@ ô@@ i l@@ à V@@ i@@ n@@ A@@ I R@@ e@@ s@@ e@@ a@@ r@@ c@@ h'''.split()
UpperCAmelCase_ = tokenizer.tokenize(_snake_case)
print(_snake_case)
self.assertListEqual(_snake_case , _snake_case)
UpperCAmelCase_ = tokens + [tokenizer.unk_token]
UpperCAmelCase_ = [4, 3, 5, 3, 3, 3, 3, 3, 3, 6, 7, 9, 3, 9, 3, 3, 3, 3, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_snake_case) , _snake_case)
| 51
| 0
|
from pathlib import Path
import fire
def _snake_case ( lowerCAmelCase : str , lowerCAmelCase : str , lowerCAmelCase : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = Path(__A )
SCREAMING_SNAKE_CASE_ : Tuple = Path(__A )
dest_dir.mkdir(exist_ok=__A )
for path in src_dir.iterdir():
SCREAMING_SNAKE_CASE_ : Tuple = [x.rstrip() for x in list(path.open().readlines() )][:n]
SCREAMING_SNAKE_CASE_ : Optional[Any] = dest_dir.joinpath(path.name )
print(__A )
dest_path.open("w" ).write("\n".join(__A ) )
if __name__ == "__main__":
fire.Fire(minify)
| 18
|
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
snake_case_ : Any = logging.get_logger(__name__)
snake_case_ : Optional[int] = TypeVar("DatasetType", Dataset, IterableDataset)
def A (__A : List[DatasetType] , __A : Optional[List[float]] = None , __A : Optional[int] = None , __A : Optional[DatasetInfo] = None , __A : Optional[NamedSplit] = None , __A : Literal["first_exhausted", "all_exhausted"] = "first_exhausted" , ) -> DatasetType:
"""simple docstring"""
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError('''Unable to interleave an empty list of datasets.''' )
for i, dataset in enumerate(__A ):
if not isinstance(__A , (Dataset, IterableDataset) ):
if isinstance(__A , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} """
'''is an empty dataset dictionary.''' )
raise ValueError(
F"""Dataset at position {i} has at least one split: {list(__A )}\n"""
F"""Please pick one to interleave with the other datasets, for example: dataset['{next(iter(__A ) )}']""" )
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(__A ).__name__}.""" )
if i == 0:
UpperCAmelCase_ , UpperCAmelCase_ = (
(Dataset, IterableDataset) if isinstance(__A , __A ) else (IterableDataset, Dataset)
)
elif not isinstance(__A , __A ):
raise ValueError(
F"""Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.""" )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(F"""{stopping_strategy} is not supported. Please enter a valid stopping_strategy.""" )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
__A , __A , __A , info=__A , split=__A , stopping_strategy=__A )
else:
return _interleave_iterable_datasets(
__A , __A , __A , info=__A , split=__A , stopping_strategy=__A )
def A (__A : List[DatasetType] , __A : Optional[DatasetInfo] = None , __A : Optional[NamedSplit] = None , __A : int = 0 , ) -> DatasetType:
"""simple docstring"""
if not dsets:
raise ValueError('''Unable to concatenate an empty list of datasets.''' )
for i, dataset in enumerate(__A ):
if not isinstance(__A , (Dataset, IterableDataset) ):
if isinstance(__A , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} """
'''is an empty dataset dictionary.''' )
raise ValueError(
F"""Dataset at position {i} has at least one split: {list(__A )}\n"""
F"""Please pick one to interleave with the other datasets, for example: dataset['{next(iter(__A ) )}']""" )
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(__A ).__name__}.""" )
if i == 0:
UpperCAmelCase_ , UpperCAmelCase_ = (
(Dataset, IterableDataset) if isinstance(__A , __A ) else (IterableDataset, Dataset)
)
elif not isinstance(__A , __A ):
raise ValueError(
F"""Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.""" )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(__A , info=__A , split=__A , axis=__A )
else:
return _concatenate_iterable_datasets(__A , info=__A , split=__A , axis=__A )
| 51
| 0
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class A__ ( unittest.TestCase ):
def __init__( self , A_ , A_=13 , A_=3 , A_=224 , A_=30 , A_=400 , A_=True , A_=None , A_=True , A_=[0.5, 0.5, 0.5] , A_=[0.5, 0.5, 0.5] , ):
'''simple docstring'''
UpperCamelCase : Any = size if size is not None else {"height": 18, "width": 18}
UpperCamelCase : List[str] = parent
UpperCamelCase : int = batch_size
UpperCamelCase : Optional[int] = num_channels
UpperCamelCase : Union[str, Any] = image_size
UpperCamelCase : Tuple = min_resolution
UpperCamelCase : Union[str, Any] = max_resolution
UpperCamelCase : str = do_resize
UpperCamelCase : str = size
UpperCamelCase : List[Any] = do_normalize
UpperCamelCase : Optional[int] = image_mean
UpperCamelCase : Optional[Any] = image_std
def __UpperCamelCase( self ):
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class A__ ( __snake_case , unittest.TestCase ):
_UpperCAmelCase :int = ViTImageProcessor if is_vision_available() else None
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[int] = EfficientFormerImageProcessorTester(self )
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return self.image_proc_tester.prepare_image_processor_dict()
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A_ , "image_mean" ) )
self.assertTrue(hasattr(A_ , "image_std" ) )
self.assertTrue(hasattr(A_ , "do_normalize" ) )
self.assertTrue(hasattr(A_ , "do_resize" ) )
self.assertTrue(hasattr(A_ , "size" ) )
def __UpperCamelCase( self ):
'''simple docstring'''
pass
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase : Optional[int] = prepare_image_inputs(self.image_proc_tester , equal_resolution=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , Image.Image )
# Test not batched input
UpperCamelCase : Tuple = image_processor(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
# Test batched
UpperCamelCase : int = image_processor(A_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : str = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase : List[str] = prepare_image_inputs(self.image_proc_tester , equal_resolution=A_ , numpify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , np.ndarray )
# Test not batched input
UpperCamelCase : str = image_processor(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
# Test batched
UpperCamelCase : str = image_processor(A_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase : Optional[Any] = prepare_image_inputs(self.image_proc_tester , equal_resolution=A_ , torchify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , torch.Tensor )
# Test not batched input
UpperCamelCase : List[str] = image_processor(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
# Test batched
UpperCamelCase : Dict = image_processor(A_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
| 52
|
def A_ ( _lowerCAmelCase = 50 ) -> int:
UpperCamelCase : List[Any] = [[0] * 3 for _ in range(length + 1 )]
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length] )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 52
| 1
|
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from typing import Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import randn_tensor
from .scheduling_utils import SchedulerMixin
class A__ ( __snake_case , __snake_case ):
_UpperCAmelCase :Dict = 1
@register_to_config
def __init__( self , A_=2000 , A_=0.1 , A_=20 , A_=1e-3 ):
'''simple docstring'''
UpperCamelCase : List[Any] = None
UpperCamelCase : Any = None
UpperCamelCase : Dict = None
def __UpperCamelCase( self , A_ , A_ = None ):
'''simple docstring'''
UpperCamelCase : Dict = torch.linspace(1 , self.config.sampling_eps , A_ , device=A_ )
def __UpperCamelCase( self , A_ , A_ , A_ , A_=None ):
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
"`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler" )
# TODO(Patrick) better comments + non-PyTorch
# postprocess model score
UpperCamelCase : int = (
-0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
)
UpperCamelCase : List[str] = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) )
UpperCamelCase : List[str] = std.flatten()
while len(std.shape ) < len(score.shape ):
UpperCamelCase : Optional[Any] = std.unsqueeze(-1 )
UpperCamelCase : Union[str, Any] = -score / std
# compute
UpperCamelCase : str = -1.0 / len(self.timesteps )
UpperCamelCase : Optional[int] = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min)
UpperCamelCase : Tuple = beta_t.flatten()
while len(beta_t.shape ) < len(x.shape ):
UpperCamelCase : Optional[int] = beta_t.unsqueeze(-1 )
UpperCamelCase : List[Any] = -0.5 * beta_t * x
UpperCamelCase : List[Any] = torch.sqrt(A_ )
UpperCamelCase : Any = drift - diffusion**2 * score
UpperCamelCase : List[Any] = x + drift * dt
# add noise
UpperCamelCase : str = randn_tensor(x.shape , layout=x.layout , generator=A_ , device=x.device , dtype=x.dtype )
UpperCamelCase : Union[str, Any] = x_mean + diffusion * math.sqrt(-dt ) * noise
return x, x_mean
def __len__( self ):
'''simple docstring'''
return self.config.num_train_timesteps
| 52
|
def A_ ( _lowerCAmelCase ) -> str:
UpperCamelCase : List[Any] = ""
for ch in key:
if ch == " " or ch not in key_no_dups and ch.isalpha():
key_no_dups += ch
return key_no_dups
def A_ ( _lowerCAmelCase ) -> dict[str, str]:
UpperCamelCase : Optional[Any] = [chr(i + 65 ) for i in range(26 )]
# Remove duplicate characters from key
UpperCamelCase : Tuple = remove_duplicates(key.upper() )
UpperCamelCase : int = len(_lowerCAmelCase )
# First fill cipher with key characters
UpperCamelCase : int = {alphabet[i]: char for i, char in enumerate(_lowerCAmelCase )}
# Then map remaining characters in alphabet to
# the alphabet from the beginning
for i in range(len(_lowerCAmelCase ) , 26 ):
UpperCamelCase : Optional[Any] = alphabet[i - offset]
# Ensure we are not mapping letters to letters previously mapped
while char in key:
offset -= 1
UpperCamelCase : List[str] = alphabet[i - offset]
UpperCamelCase : List[Any] = char
return cipher_alphabet
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> str:
return "".join(cipher_map.get(_lowerCAmelCase , _lowerCAmelCase ) for ch in message.upper() )
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> str:
UpperCamelCase : Union[str, Any] = {v: k for k, v in cipher_map.items()}
return "".join(rev_cipher_map.get(_lowerCAmelCase , _lowerCAmelCase ) for ch in message.upper() )
def A_ ( ) -> None:
UpperCamelCase : int = input("Enter message to encode or decode: " ).strip()
UpperCamelCase : str = input("Enter keyword: " ).strip()
UpperCamelCase : Union[str, Any] = input("Encipher or decipher? E/D:" ).strip()[0].lower()
try:
UpperCamelCase : List[str] = {"e": encipher, "d": decipher}[option]
except KeyError:
raise KeyError("invalid input option" )
UpperCamelCase : str = create_cipher_map(_lowerCAmelCase )
print(func(_lowerCAmelCase , _lowerCAmelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 52
| 1
|
import numpy as np
from transformers import BatchFeature
from transformers.testing_utils import require_tf, require_torch
from .test_feature_extraction_common import FeatureExtractionSavingTestMixin
class A__ ( __snake_case ):
# to overwrite at feature extractactor specific tests
_UpperCAmelCase :Any = None
_UpperCAmelCase :Optional[Any] = None
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return self.feat_extract_tester.prepare_feat_extract_dict()
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[Any] = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(A_ , "feature_size" ) )
self.assertTrue(hasattr(A_ , "sampling_rate" ) )
self.assertTrue(hasattr(A_ , "padding_value" ) )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Dict = self.feat_extract_tester.prepare_inputs_for_common()
UpperCamelCase : Dict = self.feature_extraction_class(**self.feat_extract_dict )
UpperCamelCase : Tuple = feat_extract.model_input_names[0]
UpperCamelCase : Optional[int] = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(A_ ) == len(A_ ) for x, y in zip(A_ , processed_features[input_name] ) ) )
UpperCamelCase : List[str] = self.feat_extract_tester.prepare_inputs_for_common(equal_length=A_ )
UpperCamelCase : int = BatchFeature({input_name: speech_inputs} , tensor_type="np" )
UpperCamelCase : Optional[int] = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
UpperCamelCase : int = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_torch
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[str] = self.feat_extract_tester.prepare_inputs_for_common(equal_length=A_ )
UpperCamelCase : int = self.feature_extraction_class(**self.feat_extract_dict )
UpperCamelCase : Tuple = feat_extract.model_input_names[0]
UpperCamelCase : Optional[Any] = BatchFeature({input_name: speech_inputs} , tensor_type="pt" )
UpperCamelCase : Union[str, Any] = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
UpperCamelCase : List[str] = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_tf
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[int] = self.feat_extract_tester.prepare_inputs_for_common(equal_length=A_ )
UpperCamelCase : List[Any] = self.feature_extraction_class(**self.feat_extract_dict )
UpperCamelCase : str = feat_extract.model_input_names[0]
UpperCamelCase : Dict = BatchFeature({input_name: speech_inputs} , tensor_type="tf" )
UpperCamelCase : Dict = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
UpperCamelCase : str = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
def __UpperCamelCase( self , A_=False ):
'''simple docstring'''
def _inputs_have_equal_length(A_ ):
UpperCamelCase : List[Any] = len(input[0] )
for input_slice in input[1:]:
if len(A_ ) != length:
return False
return True
def _inputs_are_equal(A_ , A_ ):
if len(A_ ) != len(A_ ):
return False
for input_slice_a, input_slice_a in zip(A_ , A_ ):
if not np.allclose(np.asarray(A_ ) , np.asarray(A_ ) , atol=1e-3 ):
return False
return True
UpperCamelCase : int = self.feature_extraction_class(**self.feat_extract_dict )
UpperCamelCase : Optional[int] = self.feat_extract_tester.prepare_inputs_for_common(numpify=A_ )
UpperCamelCase : List[Any] = feat_extract.model_input_names[0]
UpperCamelCase : str = BatchFeature({input_name: speech_inputs} )
UpperCamelCase : str = self.feat_extract_tester.seq_length_diff
UpperCamelCase : List[Any] = self.feat_extract_tester.max_seq_length + pad_diff
UpperCamelCase : Optional[Any] = self.feat_extract_tester.min_seq_length
UpperCamelCase : List[str] = self.feat_extract_tester.batch_size
UpperCamelCase : Tuple = self.feat_extract_tester.feature_size
# test padding for List[int] + numpy
UpperCamelCase : Optional[int] = feat_extract.pad(A_ , padding=A_ )
UpperCamelCase : Dict = input_a[input_name]
UpperCamelCase : Dict = feat_extract.pad(A_ , padding="longest" )
UpperCamelCase : Any = input_a[input_name]
UpperCamelCase : List[Any] = feat_extract.pad(A_ , padding="max_length" , max_length=len(speech_inputs[-1] ) )
UpperCamelCase : Optional[int] = input_a[input_name]
UpperCamelCase : Optional[Any] = feat_extract.pad(A_ , padding="longest" , return_tensors="np" )
UpperCamelCase : int = input_a[input_name]
# max_length parameter has to be provided when setting `padding="max_length"`
with self.assertRaises(A_ ):
feat_extract.pad(A_ , padding="max_length" )[input_name]
UpperCamelCase : str = feat_extract.pad(
A_ , padding="max_length" , max_length=A_ , return_tensors="np" )
UpperCamelCase : Optional[int] = input_a[input_name]
self.assertFalse(_inputs_have_equal_length(A_ ) )
self.assertTrue(_inputs_have_equal_length(A_ ) )
self.assertTrue(_inputs_have_equal_length(A_ ) )
self.assertTrue(_inputs_are_equal(A_ , A_ ) )
self.assertTrue(len(input_a[0] ) == pad_min_length )
self.assertTrue(len(input_a[1] ) == pad_min_length + pad_diff )
self.assertTrue(input_a.shape[:2] == (batch_size, len(input_a[0] )) )
self.assertTrue(input_a.shape[:2] == (batch_size, pad_max_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == input_a.shape[2] == feature_size )
# test padding for `pad_to_multiple_of` for List[int] + numpy
UpperCamelCase : Dict = feat_extract.pad(A_ , pad_to_multiple_of=10 )
UpperCamelCase : Optional[Any] = input_a[input_name]
UpperCamelCase : Optional[int] = feat_extract.pad(A_ , padding="longest" , pad_to_multiple_of=10 )
UpperCamelCase : str = input_a[input_name]
UpperCamelCase : List[str] = feat_extract.pad(
A_ , padding="max_length" , pad_to_multiple_of=10 , max_length=A_ )
UpperCamelCase : str = input_a[input_name]
UpperCamelCase : List[Any] = feat_extract.pad(
A_ , padding="max_length" , pad_to_multiple_of=10 , max_length=A_ , return_tensors="np" , )
UpperCamelCase : Tuple = input_a[input_name]
self.assertTrue(all(len(A_ ) % 10 == 0 for x in input_a ) )
self.assertTrue(_inputs_are_equal(A_ , A_ ) )
UpperCamelCase : Dict = pad_max_length if pad_max_length % 10 == 0 else (pad_max_length // 10 + 1) * 10
self.assertTrue(all(len(A_ ) == expected_mult_pad_length for x in input_a ) )
self.assertEqual(input_a.shape[:2] , (batch_size, expected_mult_pad_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == feature_size )
# Check padding value is correct
UpperCamelCase : Any = (np.ones(self.feat_extract_tester.feature_size ) * feat_extract.padding_value).sum()
self.assertTrue(
abs(np.asarray(input_a[0] )[pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) )
< 1e-3 )
self.assertTrue(
abs(
np.asarray(input_a[1] )[pad_min_length + pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - pad_diff) )
< 1e-3 )
self.assertTrue(
abs(
np.asarray(input_a[2] )[pad_min_length + 2 * pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - 2 * pad_diff) )
< 1e-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) ) < 1e-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (expected_mult_pad_length - pad_min_length) )
< 1e-3 )
def __UpperCamelCase( self , A_=False ):
'''simple docstring'''
def _inputs_have_equal_length(A_ ):
UpperCamelCase : List[Any] = len(input[0] )
for input_slice in input[1:]:
if len(A_ ) != length:
return False
return True
def _inputs_are_equal(A_ , A_ ):
if len(A_ ) != len(A_ ):
return False
for input_slice_a, input_slice_a in zip(A_ , A_ ):
if not np.allclose(np.asarray(A_ ) , np.asarray(A_ ) , atol=1e-3 ):
return False
return True
UpperCamelCase : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict )
UpperCamelCase : Tuple = self.feat_extract_tester.prepare_inputs_for_common(numpify=A_ )
UpperCamelCase : Tuple = feat_extract.model_input_names[0]
UpperCamelCase : Optional[int] = BatchFeature({input_name: speech_inputs} )
# truncate to smallest
UpperCamelCase : Union[str, Any] = feat_extract.pad(
A_ , padding="max_length" , max_length=len(speech_inputs[0] ) , truncation=A_ )
UpperCamelCase : Union[str, Any] = input_a[input_name]
UpperCamelCase : Any = feat_extract.pad(A_ , padding="max_length" , max_length=len(speech_inputs[0] ) )
UpperCamelCase : Union[str, Any] = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(A_ ) )
self.assertFalse(_inputs_have_equal_length(A_ ) )
# truncate to smallest with np
UpperCamelCase : Union[str, Any] = feat_extract.pad(
A_ , padding="max_length" , max_length=len(speech_inputs[0] ) , return_tensors="np" , truncation=A_ , )
UpperCamelCase : str = input_a[input_name]
UpperCamelCase : Dict = feat_extract.pad(
A_ , padding="max_length" , max_length=len(speech_inputs[0] ) , return_tensors="np" )
UpperCamelCase : str = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(A_ ) )
self.assertTrue(input_a.shape[1] == len(speech_inputs[0] ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(A_ ) )
# truncate to middle
UpperCamelCase : Dict = feat_extract.pad(
A_ , padding="max_length" , max_length=len(speech_inputs[1] ) , truncation=A_ , return_tensors="np" , )
UpperCamelCase : Union[str, Any] = input_a[input_name]
UpperCamelCase : Tuple = feat_extract.pad(
A_ , padding="max_length" , max_length=len(speech_inputs[1] ) , truncation=A_ )
UpperCamelCase : List[str] = input_a[input_name]
UpperCamelCase : int = feat_extract.pad(
A_ , padding="max_length" , max_length=len(speech_inputs[1] ) , return_tensors="np" )
UpperCamelCase : Optional[Any] = input_a[input_name]
self.assertTrue(input_a.shape[1] == len(speech_inputs[1] ) )
self.assertTrue(_inputs_have_equal_length(A_ ) )
self.assertTrue(_inputs_have_equal_length(A_ ) )
self.assertTrue(_inputs_are_equal(A_ , A_ ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(A_ ) )
self.assertTrue(len(input_a[-1] ) == len(speech_inputs[-1] ) )
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(A_ ):
feat_extract.pad(A_ , truncation=A_ )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(A_ ):
feat_extract.pad(A_ , padding="longest" , truncation=A_ )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(A_ ):
feat_extract.pad(A_ , padding="longest" , truncation=A_ )[input_name]
# max_length parameter has to be provided when setting `truncation=True` and padding="max_length"
with self.assertRaises(A_ ):
feat_extract.pad(A_ , padding="max_length" , truncation=A_ )[input_name]
# test truncation for `pad_to_multiple_of` for List[int] + numpy
UpperCamelCase : Any = 12
UpperCamelCase : str = feat_extract.pad(
A_ , padding="max_length" , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=A_ , truncation=A_ , )
UpperCamelCase : Tuple = input_a[input_name]
UpperCamelCase : Optional[int] = feat_extract.pad(
A_ , padding="max_length" , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=A_ , )
UpperCamelCase : str = input_a[input_name]
# retrieve expected_length as multiple of pad_to_multiple_of
UpperCamelCase : List[str] = len(speech_inputs[0] )
if expected_length % pad_to_multiple_of != 0:
UpperCamelCase : Optional[Any] = ((len(speech_inputs[0] ) // pad_to_multiple_of) + 1) * pad_to_multiple_of
self.assertTrue(len(input_a[0] ) == expected_length )
self.assertTrue(_inputs_have_equal_length(A_ ) )
self.assertFalse(_inputs_have_equal_length(A_ ) )
def __UpperCamelCase( self ):
'''simple docstring'''
self._check_padding(numpify=A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
self._check_padding(numpify=A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
self._check_truncation(numpify=A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
self._check_truncation(numpify=A_ )
@require_torch
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[Any] = self.feature_extraction_class(**self.feat_extract_dict )
UpperCamelCase : Dict = self.feat_extract_tester.prepare_inputs_for_common()
UpperCamelCase : Optional[Any] = feat_extract.model_input_names[0]
UpperCamelCase : Tuple = BatchFeature({input_name: speech_inputs} )
UpperCamelCase : Optional[int] = feat_extract.pad(A_ , padding="longest" , return_tensors="np" )[input_name]
UpperCamelCase : List[Any] = feat_extract.pad(A_ , padding="longest" , return_tensors="pt" )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1e-2 )
@require_tf
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = self.feature_extraction_class(**self.feat_extract_dict )
UpperCamelCase : Optional[int] = self.feat_extract_tester.prepare_inputs_for_common()
UpperCamelCase : str = feat_extract.model_input_names[0]
UpperCamelCase : Tuple = BatchFeature({input_name: speech_inputs} )
UpperCamelCase : int = feat_extract.pad(A_ , padding="longest" , return_tensors="np" )[input_name]
UpperCamelCase : Union[str, Any] = feat_extract.pad(A_ , padding="longest" , return_tensors="tf" )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_tf.numpy().astype(np.floataa ).sum() ) < 1e-2 )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[str] = self.feat_extract_dict
UpperCamelCase : Optional[Any] = True
UpperCamelCase : Union[str, Any] = self.feature_extraction_class(**A_ )
UpperCamelCase : List[Any] = self.feat_extract_tester.prepare_inputs_for_common()
UpperCamelCase : Union[str, Any] = [len(A_ ) for x in speech_inputs]
UpperCamelCase : List[Any] = feat_extract.model_input_names[0]
UpperCamelCase : Optional[Any] = BatchFeature({input_name: speech_inputs} )
UpperCamelCase : Tuple = feat_extract.pad(A_ , padding="longest" , return_tensors="np" )
self.assertIn("attention_mask" , A_ )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[int] = self.feat_extract_dict
UpperCamelCase : Optional[int] = True
UpperCamelCase : Optional[int] = self.feature_extraction_class(**A_ )
UpperCamelCase : Dict = self.feat_extract_tester.prepare_inputs_for_common()
UpperCamelCase : Tuple = [len(A_ ) for x in speech_inputs]
UpperCamelCase : Optional[Any] = feat_extract.model_input_names[0]
UpperCamelCase : Union[str, Any] = BatchFeature({input_name: speech_inputs} )
UpperCamelCase : Dict = min(A_ )
UpperCamelCase : str = feat_extract.pad(
A_ , padding="max_length" , max_length=A_ , truncation=A_ , return_tensors="np" )
self.assertIn("attention_mask" , A_ )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
| 52
|
from sklearn.metrics import fa_score
import datasets
__lowerCamelCase : List[Any] = """
The F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:
F1 = 2 * (precision * recall) / (precision + recall)
"""
__lowerCamelCase : List[Any] = """
Args:
predictions (`list` of `int`): Predicted labels.
references (`list` of `int`): Ground truth labels.
labels (`list` of `int`): The set of labels to include when `average` is not set to `'binary'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.
pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.
average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.
- 'binary': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.
- 'micro': Calculate metrics globally by counting the total true positives, false negatives and false positives.
- 'macro': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.
- 'weighted': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.
- 'samples': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).
sample_weight (`list` of `float`): Sample weights Defaults to None.
Returns:
f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.
Examples:
Example 1-A simple binary example
>>> f1_metric = datasets.load_metric(\"f1\")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])
>>> print(results)
{'f1': 0.5}
Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.
>>> f1_metric = datasets.load_metric(\"f1\")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)
>>> print(round(results['f1'], 2))
0.67
Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.
>>> f1_metric = datasets.load_metric(\"f1\")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])
>>> print(round(results['f1'], 2))
0.35
Example 4-A multiclass example, with different values for the `average` input.
>>> predictions = [0, 2, 1, 0, 0, 1]
>>> references = [0, 1, 2, 0, 1, 2]
>>> results = f1_metric.compute(predictions=predictions, references=references, average=\"macro\")
>>> print(round(results['f1'], 2))
0.27
>>> results = f1_metric.compute(predictions=predictions, references=references, average=\"micro\")
>>> print(round(results['f1'], 2))
0.33
>>> results = f1_metric.compute(predictions=predictions, references=references, average=\"weighted\")
>>> print(round(results['f1'], 2))
0.27
>>> results = f1_metric.compute(predictions=predictions, references=references, average=None)
>>> print(results)
{'f1': array([0.8, 0. , 0. ])}
"""
__lowerCamelCase : str = """
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A__ ( datasets.Metric ):
def __UpperCamelCase( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("int32" ) ),
"references": datasets.Sequence(datasets.Value("int32" ) ),
}
if self.config_name == "multilabel"
else {
"predictions": datasets.Value("int32" ),
"references": datasets.Value("int32" ),
} ) , reference_urls=["https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html"] , )
def __UpperCamelCase( self , A_ , A_ , A_=None , A_=1 , A_="binary" , A_=None ):
'''simple docstring'''
UpperCamelCase : List[str] = fa_score(
A_ , A_ , labels=A_ , pos_label=A_ , average=A_ , sample_weight=A_ )
return {"f1": float(A_ ) if score.size == 1 else score}
| 52
| 1
|
import argparse
import json
import os
import torch
from torch import nn
from transformers import NllbMoeConfig, NllbMoeModel
from transformers.modeling_utils import dtype_byte_size
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
def A_ ( _lowerCAmelCase ) -> List[Any]:
UpperCamelCase : str = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"decoder.output_projection.weight",
"_float_tensor",
"encoder.embed_positions._float_tensor",
"decoder.embed_positions._float_tensor",
]
for k in ignore_keys:
state_dict.pop(_lowerCAmelCase , _lowerCAmelCase )
def A_ ( _lowerCAmelCase ) -> Dict:
UpperCamelCase , UpperCamelCase : str = emb.weight.shape
UpperCamelCase : int = nn.Linear(_lowerCAmelCase , _lowerCAmelCase , bias=_lowerCAmelCase )
UpperCamelCase : str = emb.weight.data
return lin_layer
def A_ ( _lowerCAmelCase , _lowerCAmelCase=None ) -> Dict:
UpperCamelCase : Optional[int] = {}
for old_key in state_dict.keys():
UpperCamelCase : Optional[Any] = old_key
if "moe_layer.experts." in key:
if expert_idx is not None:
UpperCamelCase : Any = key.replace("moe_layer.experts.0" , F"""ffn.experts.expert_{expert_idx}""" )
else:
UpperCamelCase : List[Any] = key.replace("moe_layer.experts." , "ffn.experts.expert_" )
if "gate" in key:
UpperCamelCase : List[Any] = key.replace(".moe_layer.gate.wg" , ".ffn.router.classifier" )
if "fc2" and "experts" not in key:
UpperCamelCase : Optional[Any] = key.replace(".fc2." , ".ffn.fc2." )
if "fc1" and "experts" not in key:
UpperCamelCase : Dict = key.replace(".fc1." , ".ffn.fc1." )
if ".encoder_attn." in key:
UpperCamelCase : List[str] = key.replace(".encoder_attn." , ".cross_attention." )
if "encoder_attn_layer_norm" in key:
UpperCamelCase : Union[str, Any] = key.replace("encoder_attn_layer_norm" , "cross_attention_layer_norm" )
if "final_layer_norm" in key:
UpperCamelCase : Dict = key.replace("final_layer_norm" , "ff_layer_norm" )
UpperCamelCase : Optional[int] = state_dict[old_key]
return new_dict
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = WEIGHTS_NAME ) -> Optional[int]:
UpperCamelCase : Optional[Any] = []
UpperCamelCase : Optional[int] = 0
os.makedirs(_lowerCAmelCase , exist_ok=_lowerCAmelCase )
for expert in range(_lowerCAmelCase ):
UpperCamelCase : Tuple = switch_checkpoint_path + F"""-rank-{expert}.pt"""
if os.path.isfile(_lowerCAmelCase ):
UpperCamelCase : str = torch.load(_lowerCAmelCase )["model"]
remove_ignore_keys_(_lowerCAmelCase )
UpperCamelCase : List[Any] = rename_fairseq_keys(_lowerCAmelCase , _lowerCAmelCase )
UpperCamelCase : Optional[int] = os.path.join(
_lowerCAmelCase , weights_name.replace(".bin" , F"""-{len(_lowerCAmelCase )+1:05d}-of-???.bin""" ) )
torch.save(_lowerCAmelCase , _lowerCAmelCase )
sharded_state_dicts.append(expert_state.keys() )
total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size(
expert_state[list(_lowerCAmelCase )[0]].dtype )
# Add the last block
UpperCamelCase : int = os.path.join(_lowerCAmelCase , weights_name.replace(".bin" , F"""-{len(_lowerCAmelCase )+1:05d}-of-???.bin""" ) )
UpperCamelCase : Optional[int] = torch.load(switch_checkpoint_path + "-shared.pt" )["model"]
remove_ignore_keys_(_lowerCAmelCase )
UpperCamelCase : List[Any] = rename_fairseq_keys(_lowerCAmelCase , _lowerCAmelCase )
UpperCamelCase : List[str] = shared_weights["decoder.embed_tokens.weight"]
sharded_state_dicts.append(shared_weights.keys() )
# If we only have the shared weights (dummy model/experts saved on the same file)
if len(_lowerCAmelCase ) == 1:
UpperCamelCase : Tuple = os.path.join(_lowerCAmelCase , _lowerCAmelCase )
torch.save(_lowerCAmelCase , _lowerCAmelCase )
return {weights_name: sharded_state_dicts[0]}, None
else:
torch.save(_lowerCAmelCase , _lowerCAmelCase )
# Otherwise, let's build the index
UpperCamelCase : List[str] = {}
for idx, shard in enumerate(_lowerCAmelCase ):
UpperCamelCase : Optional[int] = weights_name.replace(".bin" , F"""-{idx+1:05d}-of-{len(_lowerCAmelCase ):05d}.bin""" )
UpperCamelCase : Union[str, Any] = os.path.join(_lowerCAmelCase , weights_name.replace(".bin" , F"""-{idx+1:05d}-of-???.bin""" ) )
os.rename(_lowerCAmelCase , os.path.join(_lowerCAmelCase , _lowerCAmelCase ) )
for key in shard:
UpperCamelCase : Optional[Any] = shard_file
# Add the metadata
UpperCamelCase : Tuple = {"total_size": total_size}
UpperCamelCase : str = {"metadata": metadata, "weight_map": weight_map}
with open(os.path.join(_lowerCAmelCase , _lowerCAmelCase ) , "w" , encoding="utf-8" ) as f:
UpperCamelCase : str = json.dumps(_lowerCAmelCase , indent=2 , sort_keys=_lowerCAmelCase ) + "\n"
f.write(_lowerCAmelCase )
return metadata, index
if __name__ == "__main__":
__lowerCamelCase : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--nllb_moe_checkpoint_path""",
default="""/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000""",
type=str,
required=False,
help="""Path to a directory containing a folder per layer. Follows the original Google format.""",
)
parser.add_argument("""--dtype""", default="""float32""", type=str, required=False, help="""dtype of the saved model""")
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b""",
type=str,
required=False,
help="""Path to the output pytorch model.""",
)
__lowerCamelCase : str = parser.parse_args()
__lowerCamelCase , __lowerCamelCase : int = shard_on_the_fly(
args.nllb_moe_checkpoint_path,
args.pytorch_dump_folder_path,
128,
args.dtype,
)
__lowerCamelCase : str = NllbMoeConfig.from_pretrained(
"""facebook/nllb-200-3.3B""", encoder_sparse_step=4, decoder_sparse_step=4, num_experts=128
)
config.save_pretrained(args.pytorch_dump_folder_path)
__lowerCamelCase : Dict = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
print("""Done""")
model.save_pretrained(args.pytorch_dump_folder_path)
| 52
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class A__ ( __snake_case , unittest.TestCase ):
_UpperCAmelCase :List[str] = KandinskyInpaintPipeline
_UpperCAmelCase :List[str] = ['prompt', 'image_embeds', 'negative_image_embeds', 'image', 'mask_image']
_UpperCAmelCase :Dict = [
'prompt',
'negative_prompt',
'image_embeds',
'negative_image_embeds',
'image',
'mask_image',
]
_UpperCAmelCase :Optional[int] = [
'generator',
'height',
'width',
'latents',
'guidance_scale',
'negative_prompt',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
_UpperCAmelCase :int = False
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return 32
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return 32
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return self.time_input_dim
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return 100
@property
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Any = XLMRobertaTokenizerFast.from_pretrained("YiYiXu/tiny-random-mclip-base" )
return tokenizer
@property
def __UpperCamelCase( self ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCamelCase : Optional[int] = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , )
UpperCamelCase : Optional[int] = MultilingualCLIP(A_ )
UpperCamelCase : Union[str, Any] = text_encoder.eval()
return text_encoder
@property
def __UpperCamelCase( self ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCamelCase : Optional[int] = {
"in_channels": 9,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "text_image",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "text_image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
UpperCamelCase : List[Any] = UNetaDConditionModel(**A_ )
return model
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __UpperCamelCase( self ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCamelCase : List[str] = VQModel(**self.dummy_movq_kwargs )
return model
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Any = self.dummy_text_encoder
UpperCamelCase : str = self.dummy_tokenizer
UpperCamelCase : List[Any] = self.dummy_unet
UpperCamelCase : Optional[Any] = self.dummy_movq
UpperCamelCase : Union[str, Any] = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule="linear" , beta_start=0.0_00_85 , beta_end=0.0_12 , clip_sample=A_ , set_alpha_to_one=A_ , steps_offset=1 , prediction_type="epsilon" , thresholding=A_ , )
UpperCamelCase : Optional[Any] = {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def __UpperCamelCase( self , A_ , A_=0 ):
'''simple docstring'''
UpperCamelCase : Dict = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(A_ ) ).to(A_ )
UpperCamelCase : Union[str, Any] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(A_ )
# create init_image
UpperCamelCase : Union[str, Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(A_ ) ).to(A_ )
UpperCamelCase : str = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCamelCase : List[Any] = Image.fromarray(np.uinta(A_ ) ).convert("RGB" ).resize((256, 256) )
# create mask
UpperCamelCase : str = np.ones((64, 64) , dtype=np.floataa )
UpperCamelCase : str = 0
if str(A_ ).startswith("mps" ):
UpperCamelCase : int = torch.manual_seed(A_ )
else:
UpperCamelCase : Tuple = torch.Generator(device=A_ ).manual_seed(A_ )
UpperCamelCase : Union[str, Any] = {
"prompt": "horse",
"image": init_image,
"mask_image": mask,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"generator": generator,
"height": 64,
"width": 64,
"num_inference_steps": 2,
"guidance_scale": 4.0,
"output_type": "np",
}
return inputs
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[int] = "cpu"
UpperCamelCase : Tuple = self.get_dummy_components()
UpperCamelCase : str = self.pipeline_class(**A_ )
UpperCamelCase : Tuple = pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
UpperCamelCase : Any = pipe(**self.get_dummy_inputs(A_ ) )
UpperCamelCase : List[Any] = output.images
UpperCamelCase : List[Any] = pipe(
**self.get_dummy_inputs(A_ ) , return_dict=A_ , )[0]
UpperCamelCase : List[Any] = image[0, -3:, -3:, -1]
UpperCamelCase : Any = image_from_tuple[0, -3:, -3:, -1]
print(F"""image.shape {image.shape}""" )
assert image.shape == (1, 64, 64, 3)
UpperCamelCase : Union[str, Any] = np.array(
[0.8_32_69_19, 0.73_79_04_67, 0.20_91_85_81, 0.9_30_96_12, 0.5_51_17_91, 0.43_71_33_28, 0.5_51_33_21, 0.49_92_29_34, 0.59_49_77_86] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
def __UpperCamelCase( self ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class A__ ( unittest.TestCase ):
def __UpperCamelCase( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy" )
UpperCamelCase : List[str] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" )
UpperCamelCase : Dict = np.ones((768, 768) , dtype=np.floataa )
UpperCamelCase : str = 0
UpperCamelCase : List[Any] = "a hat"
UpperCamelCase : Tuple = KandinskyPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-1-prior" , torch_dtype=torch.floataa )
pipe_prior.to(A_ )
UpperCamelCase : Union[str, Any] = KandinskyInpaintPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-1-inpaint" , torch_dtype=torch.floataa )
UpperCamelCase : Optional[Any] = pipeline.to(A_ )
pipeline.set_progress_bar_config(disable=A_ )
UpperCamelCase : List[Any] = torch.Generator(device="cpu" ).manual_seed(0 )
UpperCamelCase , UpperCamelCase : Optional[Any] = pipe_prior(
A_ , generator=A_ , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
UpperCamelCase : Dict = pipeline(
A_ , image=A_ , mask_image=A_ , image_embeds=A_ , negative_image_embeds=A_ , generator=A_ , num_inference_steps=100 , height=768 , width=768 , output_type="np" , )
UpperCamelCase : List[str] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(A_ , A_ )
| 52
| 1
|
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class A__ :
def __init__( self , A_ = None ):
'''simple docstring'''
if components is None:
UpperCamelCase : Any = []
UpperCamelCase : List[str] = list(A_ )
def __len__( self ):
'''simple docstring'''
return len(self.__components )
def __str__( self ):
'''simple docstring'''
return "(" + ",".join(map(A_ , self.__components ) ) + ")"
def __add__( self , A_ ):
'''simple docstring'''
UpperCamelCase : Optional[int] = len(self )
if size == len(A_ ):
UpperCamelCase : Optional[Any] = [self.__components[i] + other.component(A_ ) for i in range(A_ )]
return Vector(A_ )
else:
raise Exception("must have the same size" )
def __sub__( self , A_ ):
'''simple docstring'''
UpperCamelCase : Tuple = len(self )
if size == len(A_ ):
UpperCamelCase : Tuple = [self.__components[i] - other.component(A_ ) for i in range(A_ )]
return Vector(A_ )
else: # error case
raise Exception("must have the same size" )
@overload
def __mul__( self , A_ ):
'''simple docstring'''
...
@overload
def __mul__( self , A_ ):
'''simple docstring'''
...
def __mul__( self , A_ ):
'''simple docstring'''
if isinstance(A_ , (float, int) ):
UpperCamelCase : Optional[Any] = [c * other for c in self.__components]
return Vector(A_ )
elif isinstance(A_ , A_ ) and len(self ) == len(A_ ):
UpperCamelCase : Tuple = len(self )
UpperCamelCase : Optional[int] = [self.__components[i] * other.component(A_ ) for i in range(A_ )]
return sum(A_ )
else: # error case
raise Exception("invalid operand!" )
def __UpperCamelCase( self ):
'''simple docstring'''
return Vector(self.__components )
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
if isinstance(A_ , A_ ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception("index out of range" )
def __UpperCamelCase( self , A_ , A_ ):
'''simple docstring'''
assert -len(self.__components ) <= pos < len(self.__components )
UpperCamelCase : Any = value
def __UpperCamelCase( self ):
'''simple docstring'''
if len(self.__components ) == 0:
raise Exception("Vector is empty" )
UpperCamelCase : Dict = [c**2 for c in self.__components]
return math.sqrt(sum(A_ ) )
def __UpperCamelCase( self , A_ , A_ = False ):
'''simple docstring'''
UpperCamelCase : int = self * other
UpperCamelCase : str = self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def A_ ( _lowerCAmelCase ) -> Vector:
assert isinstance(_lowerCAmelCase , _lowerCAmelCase )
return Vector([0] * dimension )
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> Vector:
assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (isinstance(_lowerCAmelCase , _lowerCAmelCase ))
UpperCamelCase : Dict = [0] * dimension
UpperCamelCase : str = 1
return Vector(_lowerCAmelCase )
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Vector:
assert (
isinstance(_lowerCAmelCase , _lowerCAmelCase )
and isinstance(_lowerCAmelCase , _lowerCAmelCase )
and (isinstance(_lowerCAmelCase , (int, float) ))
)
return x * scalar + y
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Vector:
random.seed(_lowerCAmelCase )
UpperCamelCase : str = [random.randint(_lowerCAmelCase , _lowerCAmelCase ) for _ in range(_lowerCAmelCase )]
return Vector(_lowerCAmelCase )
class A__ :
def __init__( self , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : List[str] = matrix
UpperCamelCase : List[str] = w
UpperCamelCase : Any = h
def __str__( self ):
'''simple docstring'''
UpperCamelCase : Dict = ""
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__( self , A_ ):
'''simple docstring'''
if self.__width == other.width() and self.__height == other.height():
UpperCamelCase : Optional[Any] = []
for i in range(self.__height ):
UpperCamelCase : Optional[int] = [
self.__matrix[i][j] + other.component(A_ , A_ )
for j in range(self.__width )
]
matrix.append(A_ )
return Matrix(A_ , self.__width , self.__height )
else:
raise Exception("matrix must have the same dimension!" )
def __sub__( self , A_ ):
'''simple docstring'''
if self.__width == other.width() and self.__height == other.height():
UpperCamelCase : List[Any] = []
for i in range(self.__height ):
UpperCamelCase : Dict = [
self.__matrix[i][j] - other.component(A_ , A_ )
for j in range(self.__width )
]
matrix.append(A_ )
return Matrix(A_ , self.__width , self.__height )
else:
raise Exception("matrices must have the same dimension!" )
@overload
def __mul__( self , A_ ):
'''simple docstring'''
...
@overload
def __mul__( self , A_ ):
'''simple docstring'''
...
def __mul__( self , A_ ):
'''simple docstring'''
if isinstance(A_ , A_ ): # matrix-vector
if len(A_ ) == self.__width:
UpperCamelCase : int = zero_vector(self.__height )
for i in range(self.__height ):
UpperCamelCase : int = [
self.__matrix[i][j] * other.component(A_ )
for j in range(self.__width )
]
ans.change_component(A_ , sum(A_ ) )
return ans
else:
raise Exception(
"vector must have the same size as the "
"number of columns of the matrix!" )
elif isinstance(A_ , (int, float) ): # matrix-scalar
UpperCamelCase : Dict = [
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(A_ , self.__width , self.__height )
return None
def __UpperCamelCase( self ):
'''simple docstring'''
return self.__height
def __UpperCamelCase( self ):
'''simple docstring'''
return self.__width
def __UpperCamelCase( self , A_ , A_ ):
'''simple docstring'''
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception("change_component: indices out of bounds" )
def __UpperCamelCase( self , A_ , A_ , A_ ):
'''simple docstring'''
if 0 <= x < self.__height and 0 <= y < self.__width:
UpperCamelCase : List[str] = value
else:
raise Exception("change_component: indices out of bounds" )
def __UpperCamelCase( self , A_ , A_ ):
'''simple docstring'''
if self.__height != self.__width:
raise Exception("Matrix is not square" )
UpperCamelCase : List[Any] = self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(A_ ) ):
UpperCamelCase : int = minor[i][:y] + minor[i][y + 1 :]
return Matrix(A_ , self.__width - 1 , self.__height - 1 ).determinant()
def __UpperCamelCase( self , A_ , A_ ):
'''simple docstring'''
if self.__height != self.__width:
raise Exception("Matrix is not square" )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(A_ , A_ )
else:
raise Exception("Indices out of bounds" )
def __UpperCamelCase( self ):
'''simple docstring'''
if self.__height != self.__width:
raise Exception("Matrix is not square" )
if self.__height < 1:
raise Exception("Matrix has no element" )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
UpperCamelCase : Union[str, Any] = [
self.__matrix[0][y] * self.cofactor(0 , A_ ) for y in range(self.__width )
]
return sum(A_ )
def A_ ( _lowerCAmelCase ) -> Matrix:
UpperCamelCase : list[list[float]] = [[0] * n for _ in range(_lowerCAmelCase )]
return Matrix(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Matrix:
random.seed(_lowerCAmelCase )
UpperCamelCase : list[list[float]] = [
[random.randint(_lowerCAmelCase , _lowerCAmelCase ) for _ in range(_lowerCAmelCase )] for _ in range(_lowerCAmelCase )
]
return Matrix(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
| 52
|
class A__ :
def __init__( self , A_ ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = set_counts
UpperCamelCase : int = max(A_ )
UpperCamelCase : Optional[Any] = len(A_ )
UpperCamelCase : Union[str, Any] = [1] * num_sets
UpperCamelCase : Union[str, Any] = list(range(A_ ) )
def __UpperCamelCase( self , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : Any = self.get_parent(A_ )
UpperCamelCase : Optional[int] = self.get_parent(A_ )
if src_parent == dst_parent:
return False
if self.ranks[dst_parent] >= self.ranks[src_parent]:
self.set_counts[dst_parent] += self.set_counts[src_parent]
UpperCamelCase : int = 0
UpperCamelCase : Dict = dst_parent
if self.ranks[dst_parent] == self.ranks[src_parent]:
self.ranks[dst_parent] += 1
UpperCamelCase : Optional[int] = self.set_counts[dst_parent]
else:
self.set_counts[src_parent] += self.set_counts[dst_parent]
UpperCamelCase : Any = 0
UpperCamelCase : Optional[int] = src_parent
UpperCamelCase : int = self.set_counts[src_parent]
UpperCamelCase : Any = max(self.max_set , A_ )
return True
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
if self.parents[disj_set] == disj_set:
return disj_set
UpperCamelCase : Optional[int] = self.get_parent(self.parents[disj_set] )
return self.parents[disj_set]
| 52
| 1
|
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class A__ ( unittest.TestCase ):
@property
def __UpperCamelCase( self ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCamelCase : Optional[Any] = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , )
return model
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = self.dummy_uncond_unet
UpperCamelCase : str = KarrasVeScheduler()
UpperCamelCase : List[str] = KarrasVePipeline(unet=A_ , scheduler=A_ )
pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
UpperCamelCase : Tuple = torch.manual_seed(0 )
UpperCamelCase : Union[str, Any] = pipe(num_inference_steps=2 , generator=A_ , output_type="numpy" ).images
UpperCamelCase : List[Any] = torch.manual_seed(0 )
UpperCamelCase : Any = pipe(num_inference_steps=2 , generator=A_ , output_type="numpy" , return_dict=A_ )[0]
UpperCamelCase : str = image[0, -3:, -3:, -1]
UpperCamelCase : Union[str, Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCamelCase : Tuple = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class A__ ( unittest.TestCase ):
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[Any] = "google/ncsnpp-celebahq-256"
UpperCamelCase : Optional[int] = UNetaDModel.from_pretrained(A_ )
UpperCamelCase : Any = KarrasVeScheduler()
UpperCamelCase : List[Any] = KarrasVePipeline(unet=A_ , scheduler=A_ )
pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
UpperCamelCase : List[str] = torch.manual_seed(0 )
UpperCamelCase : List[str] = pipe(num_inference_steps=20 , generator=A_ , output_type="numpy" ).images
UpperCamelCase : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
UpperCamelCase : int = np.array([0.5_78, 0.58_11, 0.59_24, 0.58_09, 0.5_87, 0.58_86, 0.58_61, 0.58_02, 0.5_86] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 52
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCamelCase : Any = {
"""configuration_electra""": ["""ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ElectraConfig""", """ElectraOnnxConfig"""],
"""tokenization_electra""": ["""ElectraTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Dict = ["""ElectraTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Tuple = [
"""ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ElectraForCausalLM""",
"""ElectraForMaskedLM""",
"""ElectraForMultipleChoice""",
"""ElectraForPreTraining""",
"""ElectraForQuestionAnswering""",
"""ElectraForSequenceClassification""",
"""ElectraForTokenClassification""",
"""ElectraModel""",
"""ElectraPreTrainedModel""",
"""load_tf_weights_in_electra""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : List[Any] = [
"""TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFElectraForMaskedLM""",
"""TFElectraForMultipleChoice""",
"""TFElectraForPreTraining""",
"""TFElectraForQuestionAnswering""",
"""TFElectraForSequenceClassification""",
"""TFElectraForTokenClassification""",
"""TFElectraModel""",
"""TFElectraPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : List[str] = [
"""FlaxElectraForCausalLM""",
"""FlaxElectraForMaskedLM""",
"""FlaxElectraForMultipleChoice""",
"""FlaxElectraForPreTraining""",
"""FlaxElectraForQuestionAnswering""",
"""FlaxElectraForSequenceClassification""",
"""FlaxElectraForTokenClassification""",
"""FlaxElectraModel""",
"""FlaxElectraPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
__lowerCamelCase : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 52
| 1
|
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class A__ ( __snake_case , __snake_case , unittest.TestCase ):
_UpperCAmelCase :Optional[Any] = IFInpaintingSuperResolutionPipeline
_UpperCAmelCase :List[str] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'width', 'height'}
_UpperCAmelCase :List[Any] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({'original_image'} )
_UpperCAmelCase :Optional[Any] = PipelineTesterMixin.required_optional_params - {'latents'}
def __UpperCamelCase( self ):
'''simple docstring'''
return self._get_superresolution_dummy_components()
def __UpperCamelCase( self , A_ , A_=0 ):
'''simple docstring'''
if str(A_ ).startswith("mps" ):
UpperCamelCase : Any = torch.manual_seed(A_ )
else:
UpperCamelCase : Optional[int] = torch.Generator(device=A_ ).manual_seed(A_ )
UpperCamelCase : Optional[int] = floats_tensor((1, 3, 16, 16) , rng=random.Random(A_ ) ).to(A_ )
UpperCamelCase : List[str] = floats_tensor((1, 3, 32, 32) , rng=random.Random(A_ ) ).to(A_ )
UpperCamelCase : str = floats_tensor((1, 3, 32, 32) , rng=random.Random(A_ ) ).to(A_ )
UpperCamelCase : str = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"original_image": original_image,
"mask_image": mask_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def __UpperCamelCase( self ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def __UpperCamelCase( self ):
'''simple docstring'''
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" )
def __UpperCamelCase( self ):
'''simple docstring'''
super().test_save_load_floataa(expected_max_diff=1e-1 )
def __UpperCamelCase( self ):
'''simple docstring'''
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def __UpperCamelCase( self ):
'''simple docstring'''
self._test_save_load_local()
def __UpperCamelCase( self ):
'''simple docstring'''
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 52
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__lowerCamelCase : str = logging.get_logger(__name__)
__lowerCamelCase : str = {
"""facebook/convnextv2-tiny-1k-224""": """https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json""",
}
class A__ ( __snake_case , __snake_case ):
_UpperCAmelCase :Optional[int] = 'convnextv2'
def __init__( self , A_=3 , A_=4 , A_=4 , A_=None , A_=None , A_="gelu" , A_=0.02 , A_=1e-12 , A_=0.0 , A_=224 , A_=None , A_=None , **A_ , ):
'''simple docstring'''
super().__init__(**A_ )
UpperCamelCase : Dict = num_channels
UpperCamelCase : Union[str, Any] = patch_size
UpperCamelCase : Union[str, Any] = num_stages
UpperCamelCase : List[Any] = [96, 192, 384, 768] if hidden_sizes is None else hidden_sizes
UpperCamelCase : List[str] = [3, 3, 9, 3] if depths is None else depths
UpperCamelCase : Dict = hidden_act
UpperCamelCase : Union[str, Any] = initializer_range
UpperCamelCase : Tuple = layer_norm_eps
UpperCamelCase : str = drop_path_rate
UpperCamelCase : List[str] = image_size
UpperCamelCase : List[str] = ["stem"] + [F"""stage{idx}""" for idx in range(1 , len(self.depths ) + 1 )]
UpperCamelCase , UpperCamelCase : str = get_aligned_output_features_output_indices(
out_features=A_ , out_indices=A_ , stage_names=self.stage_names )
| 52
| 1
|
def A_ ( _lowerCAmelCase = 1000 ) -> int:
UpperCamelCase , UpperCamelCase : List[Any] = 1, 1
UpperCamelCase : Union[str, Any] = 2
while True:
UpperCamelCase : Union[str, Any] = 0
UpperCamelCase : Tuple = fa + fa
UpperCamelCase , UpperCamelCase : Optional[Any] = fa, f
index += 1
for _ in str(_lowerCAmelCase ):
i += 1
if i == n:
break
return index
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 52
|
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def A_ ( ) -> List[Any]:
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(_lowerCAmelCase ):
requests.request("GET" , "https://huggingface.co" )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request("GET" , "https://huggingface.co" , timeout=1.0 )
@pytest.mark.integration
def A_ ( ) -> Tuple:
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request("GET" , "https://huggingface.co" )
def A_ ( ) -> Optional[int]:
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(_lowerCAmelCase ):
http_head("https://huggingface.co" )
| 52
| 1
|
import functools
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> int:
UpperCamelCase : Optional[int] = len(_lowerCAmelCase )
UpperCamelCase : List[str] = len(_lowerCAmelCase )
@functools.cache
def min_distance(_lowerCAmelCase , _lowerCAmelCase ) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
UpperCamelCase : Union[str, Any] = int(worda[indexa] != worda[indexa] ) # current letters not identical
return min(
1 + min_distance(indexa + 1 , _lowerCAmelCase ) , 1 + min_distance(_lowerCAmelCase , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , )
return min_distance(0 , 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 52
|
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCamelCase : Optional[int] = {"""configuration_mmbt""": ["""MMBTConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : List[Any] = ["""MMBTForClassification""", """MMBTModel""", """ModalEmbeddings"""]
if TYPE_CHECKING:
from .configuration_mmbt import MMBTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings
else:
import sys
__lowerCamelCase : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 52
| 1
|
import os
import sys
import unittest
__lowerCamelCase : List[str] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
__lowerCamelCase : List[Any] = os.path.join(git_repo_path, """src""", """transformers""")
__lowerCamelCase : Tuple = """
{0} = None
"""
__lowerCamelCase : List[Any] = """
class {0}(metaclass=DummyObject):
_backends = {1}
def __init__(self, *args, **kwargs):
requires_backends(self, {1})
"""
__lowerCamelCase : Optional[Any] = """
def {0}(*args, **kwargs):
requires_backends({0}, {1})
"""
class A__ ( unittest.TestCase ):
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Tuple = find_backend(" _import_structure[\"models.albert\"].append(\"AlbertTokenizerFast\")" )
self.assertIsNone(A_ )
UpperCamelCase : List[Any] = find_backend(" if not is_tokenizers_available():" )
self.assertEqual(A_ , "tokenizers" )
UpperCamelCase : Union[str, Any] = find_backend(" if not is_tensorflow_text_available():" )
self.assertEqual(A_ , "tensorflow_text" )
UpperCamelCase : int = find_backend(" if not (is_sentencepiece_available() and is_tokenizers_available()):" )
self.assertEqual(A_ , "sentencepiece_and_tokenizers" )
UpperCamelCase : Tuple = find_backend(
" if not (is_sentencepiece_available() and is_tensorflow_text_available()):" )
self.assertEqual(A_ , "sentencepiece_and_tensorflow_text" )
UpperCamelCase : List[Any] = find_backend(
" if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):" )
self.assertEqual(A_ , "sentencepiece_and_tokenizers_and_vision" )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[str] = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn("torch" , A_ )
self.assertIn("tensorflow_text" , A_ )
self.assertIn("sentencepiece_and_tokenizers" , A_ )
# Likewise, we can't assert on the exact content of a key
self.assertIn("BertModel" , objects["torch"] )
self.assertIn("TFBertModel" , objects["tf"] )
self.assertIn("FlaxBertModel" , objects["flax"] )
self.assertIn("BertModel" , objects["torch"] )
self.assertIn("TFBertTokenizer" , objects["tensorflow_text"] )
self.assertIn("convert_slow_tokenizer" , objects["sentencepiece_and_tokenizers"] )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Tuple = create_dummy_object("CONSTANT" , "'torch'" )
self.assertEqual(A_ , "\nCONSTANT = None\n" )
UpperCamelCase : Optional[Any] = create_dummy_object("function" , "'torch'" )
self.assertEqual(
A_ , "\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n" )
UpperCamelCase : Optional[Any] = "\nclass FakeClass(metaclass=DummyObject):\n _backends = 'torch'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, 'torch')\n"
UpperCamelCase : Dict = create_dummy_object("FakeClass" , "'torch'" )
self.assertEqual(A_ , A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[Any] = "# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, [\"torch\"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = [\"torch\"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, [\"torch\"])\n"
UpperCamelCase : str = create_dummy_files({"torch": ["CONSTANT", "function", "FakeClass"]} )
self.assertEqual(dummy_files["torch"] , A_ )
| 52
|
import re
import string
from collections import Counter
import sacrebleu
import sacremoses
from packaging import version
import datasets
__lowerCamelCase : List[Any] = """
@inproceedings{xu-etal-2016-optimizing,
title = {Optimizing Statistical Machine Translation for Text Simplification},
authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris},
journal = {Transactions of the Association for Computational Linguistics},
volume = {4},
year={2016},
url = {https://www.aclweb.org/anthology/Q16-1029},
pages = {401--415
},
@inproceedings{post-2018-call,
title = \"A Call for Clarity in Reporting {BLEU} Scores\",
author = \"Post, Matt\",
booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",
month = oct,
year = \"2018\",
address = \"Belgium, Brussels\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/W18-6319\",
pages = \"186--191\",
}
"""
__lowerCamelCase : Optional[int] = """\
WIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU
It can be used to evaluate the quality of machine-generated texts.
"""
__lowerCamelCase : str = """
Calculates sari score (between 0 and 100) given a list of source and predicted
sentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score.
Args:
sources: list of source sentences where each sentence should be a string.
predictions: list of predicted sentences where each sentence should be a string.
references: list of lists of reference sentences where each sentence should be a string.
Returns:
sari: sari score
sacrebleu: sacrebleu score
exact: exact score
Examples:
>>> sources=[\"About 95 species are currently accepted .\"]
>>> predictions=[\"About 95 you now get in .\"]
>>> references=[[\"About 95 species are currently known .\"]]
>>> wiki_split = datasets.load_metric(\"wiki_split\")
>>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references)
>>> print(results)
{'sari': 21.805555555555557, 'sacrebleu': 14.535768424205482, 'exact': 0.0}
"""
def A_ ( _lowerCAmelCase ) -> str:
def remove_articles(_lowerCAmelCase ):
UpperCamelCase : Tuple = re.compile(r"\b(a|an|the)\b" , re.UNICODE )
return re.sub(_lowerCAmelCase , " " , _lowerCAmelCase )
def white_space_fix(_lowerCAmelCase ):
return " ".join(text.split() )
def remove_punc(_lowerCAmelCase ):
UpperCamelCase : int = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(_lowerCAmelCase ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(_lowerCAmelCase ) ) ) )
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> Any:
return int(normalize_answer(_lowerCAmelCase ) == normalize_answer(_lowerCAmelCase ) )
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> str:
UpperCamelCase : Tuple = [any(compute_exact(_lowerCAmelCase , _lowerCAmelCase ) for ref in refs ) for pred, refs in zip(_lowerCAmelCase , _lowerCAmelCase )]
return (sum(_lowerCAmelCase ) / len(_lowerCAmelCase )) * 100
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Union[str, Any]:
UpperCamelCase : Union[str, Any] = [rgram for rgrams in rgramslist for rgram in rgrams]
UpperCamelCase : Union[str, Any] = Counter(_lowerCAmelCase )
UpperCamelCase : Optional[int] = Counter(_lowerCAmelCase )
UpperCamelCase : List[Any] = Counter()
for sgram, scount in sgramcounter.items():
UpperCamelCase : Tuple = scount * numref
UpperCamelCase : Union[str, Any] = Counter(_lowerCAmelCase )
UpperCamelCase : Tuple = Counter()
for cgram, ccount in cgramcounter.items():
UpperCamelCase : Dict = ccount * numref
# KEEP
UpperCamelCase : List[Any] = sgramcounter_rep & cgramcounter_rep
UpperCamelCase : Union[str, Any] = keepgramcounter_rep & rgramcounter
UpperCamelCase : Dict = sgramcounter_rep & rgramcounter
UpperCamelCase : Optional[int] = 0
UpperCamelCase : Tuple = 0
for keepgram in keepgramcountergood_rep:
keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram]
# Fix an alleged bug [2] in the keep score computation.
# keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram]
keeptmpscorea += keepgramcountergood_rep[keepgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
UpperCamelCase : Any = 1
UpperCamelCase : Any = 1
if len(_lowerCAmelCase ) > 0:
UpperCamelCase : Dict = keeptmpscorea / len(_lowerCAmelCase )
if len(_lowerCAmelCase ) > 0:
# Fix an alleged bug [2] in the keep score computation.
# keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep)
UpperCamelCase : Union[str, Any] = keeptmpscorea / sum(keepgramcounterall_rep.values() )
UpperCamelCase : Any = 0
if keepscore_precision > 0 or keepscore_recall > 0:
UpperCamelCase : List[str] = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall)
# DELETION
UpperCamelCase : Any = sgramcounter_rep - cgramcounter_rep
UpperCamelCase : str = delgramcounter_rep - rgramcounter
UpperCamelCase : Any = sgramcounter_rep - rgramcounter
UpperCamelCase : Optional[int] = 0
UpperCamelCase : Union[str, Any] = 0
for delgram in delgramcountergood_rep:
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram]
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
UpperCamelCase : Dict = 1
if len(_lowerCAmelCase ) > 0:
UpperCamelCase : str = deltmpscorea / len(_lowerCAmelCase )
# ADDITION
UpperCamelCase : List[str] = set(_lowerCAmelCase ) - set(_lowerCAmelCase )
UpperCamelCase : List[str] = set(_lowerCAmelCase ) & set(_lowerCAmelCase )
UpperCamelCase : Dict = set(_lowerCAmelCase ) - set(_lowerCAmelCase )
UpperCamelCase : Optional[Any] = 0
for addgram in addgramcountergood:
addtmpscore += 1
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
UpperCamelCase : Tuple = 1
UpperCamelCase : Tuple = 1
if len(_lowerCAmelCase ) > 0:
UpperCamelCase : Dict = addtmpscore / len(_lowerCAmelCase )
if len(_lowerCAmelCase ) > 0:
UpperCamelCase : Tuple = addtmpscore / len(_lowerCAmelCase )
UpperCamelCase : List[str] = 0
if addscore_precision > 0 or addscore_recall > 0:
UpperCamelCase : List[str] = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall)
return (keepscore, delscore_precision, addscore)
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> List[str]:
UpperCamelCase : int = len(_lowerCAmelCase )
UpperCamelCase : Optional[Any] = ssent.split(" " )
UpperCamelCase : Dict = csent.split(" " )
UpperCamelCase : str = []
UpperCamelCase : Any = []
UpperCamelCase : Any = []
UpperCamelCase : Union[str, Any] = []
UpperCamelCase : str = []
UpperCamelCase : str = []
UpperCamelCase : Dict = []
UpperCamelCase : int = []
UpperCamelCase : Optional[Any] = []
UpperCamelCase : Tuple = []
for rsent in rsents:
UpperCamelCase : List[Any] = rsent.split(" " )
UpperCamelCase : List[str] = []
UpperCamelCase : int = []
UpperCamelCase : Tuple = []
ragramslist.append(_lowerCAmelCase )
for i in range(0 , len(_lowerCAmelCase ) - 1 ):
if i < len(_lowerCAmelCase ) - 1:
UpperCamelCase : Dict = ragrams[i] + " " + ragrams[i + 1]
ragrams.append(_lowerCAmelCase )
if i < len(_lowerCAmelCase ) - 2:
UpperCamelCase : Dict = ragrams[i] + " " + ragrams[i + 1] + " " + ragrams[i + 2]
ragrams.append(_lowerCAmelCase )
if i < len(_lowerCAmelCase ) - 3:
UpperCamelCase : List[Any] = ragrams[i] + " " + ragrams[i + 1] + " " + ragrams[i + 2] + " " + ragrams[i + 3]
ragrams.append(_lowerCAmelCase )
ragramslist.append(_lowerCAmelCase )
ragramslist.append(_lowerCAmelCase )
ragramslist.append(_lowerCAmelCase )
for i in range(0 , len(_lowerCAmelCase ) - 1 ):
if i < len(_lowerCAmelCase ) - 1:
UpperCamelCase : Union[str, Any] = sagrams[i] + " " + sagrams[i + 1]
sagrams.append(_lowerCAmelCase )
if i < len(_lowerCAmelCase ) - 2:
UpperCamelCase : List[str] = sagrams[i] + " " + sagrams[i + 1] + " " + sagrams[i + 2]
sagrams.append(_lowerCAmelCase )
if i < len(_lowerCAmelCase ) - 3:
UpperCamelCase : Optional[int] = sagrams[i] + " " + sagrams[i + 1] + " " + sagrams[i + 2] + " " + sagrams[i + 3]
sagrams.append(_lowerCAmelCase )
for i in range(0 , len(_lowerCAmelCase ) - 1 ):
if i < len(_lowerCAmelCase ) - 1:
UpperCamelCase : Union[str, Any] = cagrams[i] + " " + cagrams[i + 1]
cagrams.append(_lowerCAmelCase )
if i < len(_lowerCAmelCase ) - 2:
UpperCamelCase : Optional[Any] = cagrams[i] + " " + cagrams[i + 1] + " " + cagrams[i + 2]
cagrams.append(_lowerCAmelCase )
if i < len(_lowerCAmelCase ) - 3:
UpperCamelCase : Union[str, Any] = cagrams[i] + " " + cagrams[i + 1] + " " + cagrams[i + 2] + " " + cagrams[i + 3]
cagrams.append(_lowerCAmelCase )
((UpperCamelCase) , (UpperCamelCase) , (UpperCamelCase)) : Optional[int] = SARIngram(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
((UpperCamelCase) , (UpperCamelCase) , (UpperCamelCase)) : Optional[Any] = SARIngram(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
((UpperCamelCase) , (UpperCamelCase) , (UpperCamelCase)) : str = SARIngram(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
((UpperCamelCase) , (UpperCamelCase) , (UpperCamelCase)) : Optional[int] = SARIngram(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
UpperCamelCase : Tuple = sum([keepascore, keepascore, keepascore, keepascore] ) / 4
UpperCamelCase : str = sum([delascore, delascore, delascore, delascore] ) / 4
UpperCamelCase : Union[str, Any] = sum([addascore, addascore, addascore, addascore] ) / 4
UpperCamelCase : Union[str, Any] = (avgkeepscore + avgdelscore + avgaddscore) / 3
return finalscore
def A_ ( _lowerCAmelCase , _lowerCAmelCase = True , _lowerCAmelCase = "13a" , _lowerCAmelCase = True ) -> Optional[Any]:
# Normalization is requried for the ASSET dataset (one of the primary
# datasets in sentence simplification) to allow using space
# to split the sentence. Even though Wiki-Auto and TURK datasets,
# do not require normalization, we do it for consistency.
# Code adapted from the EASSE library [1] written by the authors of the ASSET dataset.
# [1] https://github.com/feralvam/easse/blob/580bba7e1378fc8289c663f864e0487188fe8067/easse/utils/preprocessing.py#L7
if lowercase:
UpperCamelCase : Dict = sentence.lower()
if tokenizer in ["13a", "intl"]:
if version.parse(sacrebleu.__version__ ).major >= 2:
UpperCamelCase : str = sacrebleu.metrics.bleu._get_tokenizer(_lowerCAmelCase )()(_lowerCAmelCase )
else:
UpperCamelCase : Dict = sacrebleu.TOKENIZERS[tokenizer]()(_lowerCAmelCase )
elif tokenizer == "moses":
UpperCamelCase : Union[str, Any] = sacremoses.MosesTokenizer().tokenize(_lowerCAmelCase , return_str=_lowerCAmelCase , escape=_lowerCAmelCase )
elif tokenizer == "penn":
UpperCamelCase : str = sacremoses.MosesTokenizer().penn_tokenize(_lowerCAmelCase , return_str=_lowerCAmelCase )
else:
UpperCamelCase : Union[str, Any] = sentence
if not return_str:
UpperCamelCase : Tuple = normalized_sent.split()
return normalized_sent
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> List[str]:
if not (len(_lowerCAmelCase ) == len(_lowerCAmelCase ) == len(_lowerCAmelCase )):
raise ValueError("Sources length must match predictions and references lengths." )
UpperCamelCase : Optional[Any] = 0
for src, pred, refs in zip(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
sari_score += SARIsent(normalize(_lowerCAmelCase ) , normalize(_lowerCAmelCase ) , [normalize(_lowerCAmelCase ) for sent in refs] )
UpperCamelCase : Optional[int] = sari_score / len(_lowerCAmelCase )
return 100 * sari_score
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase="exp" , _lowerCAmelCase=None , _lowerCAmelCase=False , _lowerCAmelCase=False , _lowerCAmelCase=False , ) -> List[str]:
UpperCamelCase : Optional[Any] = len(references[0] )
if any(len(_lowerCAmelCase ) != references_per_prediction for refs in references ):
raise ValueError("Sacrebleu requires the same number of references for each prediction" )
UpperCamelCase : Optional[int] = [[refs[i] for refs in references] for i in range(_lowerCAmelCase )]
UpperCamelCase : Tuple = sacrebleu.corpus_bleu(
_lowerCAmelCase , _lowerCAmelCase , smooth_method=_lowerCAmelCase , smooth_value=_lowerCAmelCase , force=_lowerCAmelCase , lowercase=_lowerCAmelCase , use_effective_order=_lowerCAmelCase , )
return output.score
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A__ ( datasets.Metric ):
def __UpperCamelCase( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Sequence(datasets.Value("string" , id="sequence" ) , id="references" ),
} ) , codebase_urls=[
"https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py",
"https://github.com/cocoxu/simplification/blob/master/SARI.py",
"https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py",
"https://github.com/mjpost/sacreBLEU",
] , reference_urls=[
"https://www.aclweb.org/anthology/Q16-1029.pdf",
"https://github.com/mjpost/sacreBLEU",
"https://en.wikipedia.org/wiki/BLEU",
"https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213",
] , )
def __UpperCamelCase( self , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = {}
result.update({"sari": compute_sari(sources=A_ , predictions=A_ , references=A_ )} )
result.update({"sacrebleu": compute_sacrebleu(predictions=A_ , references=A_ )} )
result.update({"exact": compute_em(predictions=A_ , references=A_ )} )
return result
| 52
| 1
|
import pytest
import datasets
# Import fixture modules as plugins
__lowerCamelCase : Any = ["""tests.fixtures.files""", """tests.fixtures.hub""", """tests.fixtures.fsspec"""]
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> Union[str, Any]:
# Mark tests as "unit" by default if not marked as "integration" (or already marked as "unit")
for item in items:
if any(marker in item.keywords for marker in ["integration", "unit"] ):
continue
item.add_marker(pytest.mark.unit )
def A_ ( _lowerCAmelCase ) -> Optional[int]:
config.addinivalue_line("markers" , "torchaudio_latest: mark test to run with torchaudio>=0.12" )
@pytest.fixture(autouse=_lowerCAmelCase )
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> Optional[int]:
# test_hf_cache_home = tmp_path_factory.mktemp("cache") # TODO: why a cache dir per test function does not work?
UpperCamelCase : List[Any] = tmp_path_factory.getbasetemp() / "cache"
UpperCamelCase : Optional[int] = test_hf_cache_home / "datasets"
UpperCamelCase : str = test_hf_cache_home / "metrics"
UpperCamelCase : Union[str, Any] = test_hf_cache_home / "modules"
monkeypatch.setattr("datasets.config.HF_DATASETS_CACHE" , str(_lowerCAmelCase ) )
monkeypatch.setattr("datasets.config.HF_METRICS_CACHE" , str(_lowerCAmelCase ) )
monkeypatch.setattr("datasets.config.HF_MODULES_CACHE" , str(_lowerCAmelCase ) )
UpperCamelCase : Dict = test_hf_datasets_cache / "downloads"
monkeypatch.setattr("datasets.config.DOWNLOADED_DATASETS_PATH" , str(_lowerCAmelCase ) )
UpperCamelCase : Tuple = test_hf_datasets_cache / "downloads" / "extracted"
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_PATH" , str(_lowerCAmelCase ) )
@pytest.fixture(autouse=_lowerCAmelCase , scope="session" )
def A_ ( ) -> Union[str, Any]:
datasets.disable_progress_bar()
@pytest.fixture(autouse=_lowerCAmelCase )
def A_ ( _lowerCAmelCase ) -> Union[str, Any]:
# don't take tests into account when counting downloads
monkeypatch.setattr("datasets.config.HF_UPDATE_DOWNLOAD_COUNTS" , _lowerCAmelCase )
@pytest.fixture
def A_ ( _lowerCAmelCase ) -> Optional[int]:
# Required to suppress RemovedIn20Warning when feature(s) are not compatible with SQLAlchemy 2.0
# To be removed once SQLAlchemy 2.0 supported
monkeypatch.setattr("sqlalchemy.util.deprecations.SILENCE_UBER_WARNING" , _lowerCAmelCase )
| 52
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCamelCase : List[Any] = logging.get_logger(__name__)
__lowerCamelCase : str = {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/config.json""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/config.json""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/config.json""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/config.json""",
"""roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json""",
"""roberta-large-openai-detector""": """https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json""",
}
class A__ ( __snake_case ):
_UpperCAmelCase :Union[str, Any] = 'roberta'
def __init__( self , A_=5_0265 , A_=768 , A_=12 , A_=12 , A_=3072 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=2 , A_=0.02 , A_=1e-12 , A_=1 , A_=0 , A_=2 , A_="absolute" , A_=True , A_=None , **A_ , ):
'''simple docstring'''
super().__init__(pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , **A_ )
UpperCamelCase : Optional[int] = vocab_size
UpperCamelCase : Dict = hidden_size
UpperCamelCase : str = num_hidden_layers
UpperCamelCase : Any = num_attention_heads
UpperCamelCase : List[str] = hidden_act
UpperCamelCase : Optional[Any] = intermediate_size
UpperCamelCase : Tuple = hidden_dropout_prob
UpperCamelCase : Tuple = attention_probs_dropout_prob
UpperCamelCase : Tuple = max_position_embeddings
UpperCamelCase : Any = type_vocab_size
UpperCamelCase : int = initializer_range
UpperCamelCase : str = layer_norm_eps
UpperCamelCase : Dict = position_embedding_type
UpperCamelCase : Any = use_cache
UpperCamelCase : Union[str, Any] = classifier_dropout
class A__ ( __snake_case ):
@property
def __UpperCamelCase( self ):
'''simple docstring'''
if self.task == "multiple-choice":
UpperCamelCase : Optional[int] = {0: "batch", 1: "choice", 2: "sequence"}
else:
UpperCamelCase : Optional[int] = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 52
| 1
|
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
__lowerCamelCase : str = 0
__lowerCamelCase : Tuple = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
__lowerCamelCase : Union[str, Any] = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
__lowerCamelCase : Optional[Any] = tuple[int, int]
class A__ :
def __init__( self , A_ , A_ , A_ , A_ , A_ , A_ , ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = pos_x
UpperCamelCase : int = pos_y
UpperCamelCase : Union[str, Any] = (pos_y, pos_x)
UpperCamelCase : Optional[int] = goal_x
UpperCamelCase : str = goal_y
UpperCamelCase : Dict = g_cost
UpperCamelCase : Tuple = parent
UpperCamelCase : Optional[int] = self.calculate_heuristic()
UpperCamelCase : List[str] = self.g_cost + self.h_cost
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : int = self.pos_x - self.goal_x
UpperCamelCase : List[str] = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(A_ ) + abs(A_ )
else:
return sqrt(dy**2 + dx**2 )
def __lt__( self , A_ ):
'''simple docstring'''
return self.f_cost < other.f_cost
class A__ :
def __init__( self , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , A_ )
UpperCamelCase : Tuple = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_9999 , A_ )
UpperCamelCase : int = [self.start]
UpperCamelCase : list[Node] = []
UpperCamelCase : Optional[int] = False
def __UpperCamelCase( self ):
'''simple docstring'''
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
UpperCamelCase : str = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(A_ )
self.closed_nodes.append(A_ )
UpperCamelCase : Tuple = self.get_successors(A_ )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(A_ )
else:
# retrieve the best current path
UpperCamelCase : Optional[Any] = self.open_nodes.pop(self.open_nodes.index(A_ ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(A_ )
else:
self.open_nodes.append(A_ )
return [self.start.pos]
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
UpperCamelCase : str = []
for action in delta:
UpperCamelCase : List[Any] = parent.pos_x + action[1]
UpperCamelCase : List[str] = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(A_ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
A_ , A_ , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , A_ , ) )
return successors
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = node
UpperCamelCase : Dict = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
UpperCamelCase : int = current_node.parent
path.reverse()
return path
class A__ :
def __init__( self , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : Tuple = AStar(A_ , A_ )
UpperCamelCase : Any = AStar(A_ , A_ )
UpperCamelCase : Optional[int] = False
def __UpperCamelCase( self ):
'''simple docstring'''
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
UpperCamelCase : List[Any] = self.fwd_astar.open_nodes.pop(0 )
UpperCamelCase : List[str] = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
A_ , A_ )
self.fwd_astar.closed_nodes.append(A_ )
self.bwd_astar.closed_nodes.append(A_ )
UpperCamelCase : Union[str, Any] = current_bwd_node
UpperCamelCase : Union[str, Any] = current_fwd_node
UpperCamelCase : Dict = {
self.fwd_astar: self.fwd_astar.get_successors(A_ ),
self.bwd_astar: self.bwd_astar.get_successors(A_ ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(A_ )
else:
# retrieve the best current path
UpperCamelCase : Optional[Any] = astar.open_nodes.pop(
astar.open_nodes.index(A_ ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(A_ )
else:
astar.open_nodes.append(A_ )
return [self.fwd_astar.start.pos]
def __UpperCamelCase( self , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : List[str] = self.fwd_astar.retrace_path(A_ )
UpperCamelCase : Optional[int] = self.bwd_astar.retrace_path(A_ )
bwd_path.pop()
bwd_path.reverse()
UpperCamelCase : int = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
__lowerCamelCase : Optional[Any] = (0, 0)
__lowerCamelCase : Any = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
__lowerCamelCase : List[Any] = time.time()
__lowerCamelCase : str = AStar(init, goal)
__lowerCamelCase : int = a_star.search()
__lowerCamelCase : List[str] = time.time() - start_time
print(f"""AStar execution time = {end_time:f} seconds""")
__lowerCamelCase : Union[str, Any] = time.time()
__lowerCamelCase : Any = BidirectionalAStar(init, goal)
__lowerCamelCase : Optional[int] = time.time() - bd_start_time
print(f"""BidirectionalAStar execution time = {bd_end_time:f} seconds""")
| 52
|
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
__lowerCamelCase : str = logging.get_logger(__name__) # pylint: disable=invalid-name
class A__ ( __snake_case ):
def __init__( self , A_ , A_ ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=A_ , scheduler=A_ )
@torch.no_grad()
def __call__( self , A_ = 1 , A_ = 100 , A_ = None , A_ = None , A_ = True , ):
'''simple docstring'''
if audio_length_in_s is None:
UpperCamelCase : str = self.unet.config.sample_size / self.unet.config.sample_rate
UpperCamelCase : Optional[Any] = audio_length_in_s * self.unet.config.sample_rate
UpperCamelCase : Any = 2 ** len(self.unet.up_blocks )
if sample_size < 3 * down_scale_factor:
raise ValueError(
F"""{audio_length_in_s} is too small. Make sure it's bigger or equal to"""
F""" {3 * down_scale_factor / self.unet.config.sample_rate}.""" )
UpperCamelCase : Union[str, Any] = int(A_ )
if sample_size % down_scale_factor != 0:
UpperCamelCase : List[str] = (
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
F"""{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled"""
F""" by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising"""
" process." )
UpperCamelCase : Any = int(A_ )
UpperCamelCase : Union[str, Any] = next(iter(self.unet.parameters() ) ).dtype
UpperCamelCase : Optional[int] = (batch_size, self.unet.config.in_channels, sample_size)
if isinstance(A_ , A_ ) and len(A_ ) != batch_size:
raise ValueError(
F"""You have passed a list of generators of length {len(A_ )}, but requested an effective batch"""
F""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
UpperCamelCase : Optional[Any] = randn_tensor(A_ , generator=A_ , device=self.device , dtype=A_ )
# set step values
self.scheduler.set_timesteps(A_ , device=audio.device )
UpperCamelCase : Optional[int] = self.scheduler.timesteps.to(A_ )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
UpperCamelCase : Dict = self.unet(A_ , A_ ).sample
# 2. compute previous image: x_t -> t_t-1
UpperCamelCase : int = self.scheduler.step(A_ , A_ , A_ ).prev_sample
UpperCamelCase : Optional[Any] = audio.clamp(-1 , 1 ).float().cpu().numpy()
UpperCamelCase : Dict = audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=A_ )
| 52
| 1
|
import importlib
import inspect
import json
import os
import re
import shutil
import sys
from pathlib import Path
from typing import Dict, Optional, Union
from urllib import request
from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info
from packaging import version
from .. import __version__
from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging
__lowerCamelCase : Dict = (
"""https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py"""
)
__lowerCamelCase : Optional[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
def A_ ( ) -> Union[str, Any]:
UpperCamelCase : List[Any] = "https://pypi.org/pypi/diffusers/json"
UpperCamelCase : Union[str, Any] = json.loads(request.urlopen(_lowerCAmelCase ).read() )["releases"].keys()
return sorted(_lowerCAmelCase , key=lambda _lowerCAmelCase : version.Version(_lowerCAmelCase ) )
def A_ ( ) -> Union[str, Any]:
# This function has already been executed if HF_MODULES_CACHE already is in the Python path.
if HF_MODULES_CACHE in sys.path:
return
sys.path.append(_lowerCAmelCase )
os.makedirs(_lowerCAmelCase , exist_ok=_lowerCAmelCase )
UpperCamelCase : List[str] = Path(_lowerCAmelCase ) / "__init__.py"
if not init_path.exists():
init_path.touch()
def A_ ( _lowerCAmelCase ) -> int:
init_hf_modules()
UpperCamelCase : Optional[int] = Path(_lowerCAmelCase ) / name
# If the parent module does not exist yet, recursively create it.
if not dynamic_module_path.parent.exists():
create_dynamic_module(dynamic_module_path.parent )
os.makedirs(_lowerCAmelCase , exist_ok=_lowerCAmelCase )
UpperCamelCase : Dict = dynamic_module_path / "__init__.py"
if not init_path.exists():
init_path.touch()
def A_ ( _lowerCAmelCase ) -> List[str]:
with open(_lowerCAmelCase , "r" , encoding="utf-8" ) as f:
UpperCamelCase : Tuple = f.read()
# Imports of the form `import .xxx`
UpperCamelCase : Union[str, Any] = re.findall("^\s*import\s+\.(\S+)\s*$" , _lowerCAmelCase , flags=re.MULTILINE )
# Imports of the form `from .xxx import yyy`
relative_imports += re.findall("^\s*from\s+\.(\S+)\s+import" , _lowerCAmelCase , flags=re.MULTILINE )
# Unique-ify
return list(set(_lowerCAmelCase ) )
def A_ ( _lowerCAmelCase ) -> Optional[int]:
UpperCamelCase : Any = False
UpperCamelCase : Dict = [module_file]
UpperCamelCase : Dict = []
# Let's recurse through all relative imports
while not no_change:
UpperCamelCase : Any = []
for f in files_to_check:
new_imports.extend(get_relative_imports(_lowerCAmelCase ) )
UpperCamelCase : List[str] = Path(_lowerCAmelCase ).parent
UpperCamelCase : Optional[int] = [str(module_path / m ) for m in new_imports]
UpperCamelCase : Any = [f for f in new_import_files if f not in all_relative_imports]
UpperCamelCase : Union[str, Any] = [F"""{f}.py""" for f in new_import_files]
UpperCamelCase : str = len(_lowerCAmelCase ) == 0
all_relative_imports.extend(_lowerCAmelCase )
return all_relative_imports
def A_ ( _lowerCAmelCase ) -> int:
with open(_lowerCAmelCase , "r" , encoding="utf-8" ) as f:
UpperCamelCase : Tuple = f.read()
# Imports of the form `import xxx`
UpperCamelCase : Union[str, Any] = re.findall("^\s*import\s+(\S+)\s*$" , _lowerCAmelCase , flags=re.MULTILINE )
# Imports of the form `from xxx import yyy`
imports += re.findall("^\s*from\s+(\S+)\s+import" , _lowerCAmelCase , flags=re.MULTILINE )
# Only keep the top-level module
UpperCamelCase : int = [imp.split("." )[0] for imp in imports if not imp.startswith("." )]
# Unique-ify and test we got them all
UpperCamelCase : Dict = list(set(_lowerCAmelCase ) )
UpperCamelCase : int = []
for imp in imports:
try:
importlib.import_module(_lowerCAmelCase )
except ImportError:
missing_packages.append(_lowerCAmelCase )
if len(_lowerCAmelCase ) > 0:
raise ImportError(
"This modeling file requires the following packages that were not found in your environment: "
F"""{", ".join(_lowerCAmelCase )}. Run `pip install {" ".join(_lowerCAmelCase )}`""" )
return get_relative_imports(_lowerCAmelCase )
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> Any:
UpperCamelCase : List[Any] = module_path.replace(os.path.sep , "." )
UpperCamelCase : Optional[Any] = importlib.import_module(_lowerCAmelCase )
if class_name is None:
return find_pipeline_class(_lowerCAmelCase )
return getattr(_lowerCAmelCase , _lowerCAmelCase )
def A_ ( _lowerCAmelCase ) -> Tuple:
from ..pipelines import DiffusionPipeline
UpperCamelCase : Union[str, Any] = dict(inspect.getmembers(_lowerCAmelCase , inspect.isclass ) )
UpperCamelCase : Any = None
for cls_name, cls in cls_members.items():
if (
cls_name != DiffusionPipeline.__name__
and issubclass(cls , _lowerCAmelCase )
and cls.__module__.split("." )[0] != "diffusers"
):
if pipeline_class is not None:
raise ValueError(
F"""Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:"""
F""" {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in"""
F""" {loaded_module}.""" )
UpperCamelCase : int = cls
return pipeline_class
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = False , _lowerCAmelCase = False , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = False , ) -> Optional[Any]:
UpperCamelCase : Tuple = str(_lowerCAmelCase )
UpperCamelCase : List[str] = os.path.join(_lowerCAmelCase , _lowerCAmelCase )
if os.path.isfile(_lowerCAmelCase ):
UpperCamelCase : Tuple = module_file_or_url
UpperCamelCase : int = "local"
elif pretrained_model_name_or_path.count("/" ) == 0:
UpperCamelCase : Optional[int] = get_diffusers_versions()
# cut ".dev0"
UpperCamelCase : Any = "v" + ".".join(__version__.split("." )[:3] )
# retrieve github version that matches
if revision is None:
UpperCamelCase : List[str] = latest_version if latest_version[1:] in available_versions else "main"
logger.info(F"""Defaulting to latest_version: {revision}.""" )
elif revision in available_versions:
UpperCamelCase : List[Any] = F"""v{revision}"""
elif revision == "main":
UpperCamelCase : Union[str, Any] = revision
else:
raise ValueError(
F"""`custom_revision`: {revision} does not exist. Please make sure to choose one of"""
F""" {", ".join(available_versions + ["main"] )}.""" )
# community pipeline on GitHub
UpperCamelCase : Optional[Any] = COMMUNITY_PIPELINES_URL.format(revision=_lowerCAmelCase , pipeline=_lowerCAmelCase )
try:
UpperCamelCase : List[Any] = cached_download(
_lowerCAmelCase , cache_dir=_lowerCAmelCase , force_download=_lowerCAmelCase , proxies=_lowerCAmelCase , resume_download=_lowerCAmelCase , local_files_only=_lowerCAmelCase , use_auth_token=_lowerCAmelCase , )
UpperCamelCase : Tuple = "git"
UpperCamelCase : List[str] = pretrained_model_name_or_path + ".py"
except EnvironmentError:
logger.error(F"""Could not locate the {module_file} inside {pretrained_model_name_or_path}.""" )
raise
else:
try:
# Load from URL or cache if already cached
UpperCamelCase : str = hf_hub_download(
_lowerCAmelCase , _lowerCAmelCase , cache_dir=_lowerCAmelCase , force_download=_lowerCAmelCase , proxies=_lowerCAmelCase , resume_download=_lowerCAmelCase , local_files_only=_lowerCAmelCase , use_auth_token=_lowerCAmelCase , )
UpperCamelCase : List[str] = os.path.join("local" , "--".join(pretrained_model_name_or_path.split("/" ) ) )
except EnvironmentError:
logger.error(F"""Could not locate the {module_file} inside {pretrained_model_name_or_path}.""" )
raise
# Check we have all the requirements in our environment
UpperCamelCase : List[Any] = check_imports(_lowerCAmelCase )
# Now we move the module inside our cached dynamic modules.
UpperCamelCase : Union[str, Any] = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule
create_dynamic_module(_lowerCAmelCase )
UpperCamelCase : Optional[int] = Path(_lowerCAmelCase ) / full_submodule
if submodule == "local" or submodule == "git":
# We always copy local files (we could hash the file to see if there was a change, and give them the name of
# that hash, to only copy when there is a modification but it seems overkill for now).
# The only reason we do the copy is to avoid putting too many folders in sys.path.
shutil.copy(_lowerCAmelCase , submodule_path / module_file )
for module_needed in modules_needed:
UpperCamelCase : List[str] = F"""{module_needed}.py"""
shutil.copy(os.path.join(_lowerCAmelCase , _lowerCAmelCase ) , submodule_path / module_needed )
else:
# Get the commit hash
# TODO: we will get this info in the etag soon, so retrieve it from there and not here.
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
UpperCamelCase : List[str] = use_auth_token
elif use_auth_token is True:
UpperCamelCase : Tuple = HfFolder.get_token()
else:
UpperCamelCase : Union[str, Any] = None
UpperCamelCase : int = model_info(_lowerCAmelCase , revision=_lowerCAmelCase , token=_lowerCAmelCase ).sha
# The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the
# benefit of versioning.
UpperCamelCase : Tuple = submodule_path / commit_hash
UpperCamelCase : Dict = full_submodule + os.path.sep + commit_hash
create_dynamic_module(_lowerCAmelCase )
if not (submodule_path / module_file).exists():
shutil.copy(_lowerCAmelCase , submodule_path / module_file )
# Make sure we also have every file with relative
for module_needed in modules_needed:
if not (submodule_path / module_needed).exists():
get_cached_module_file(
_lowerCAmelCase , F"""{module_needed}.py""" , cache_dir=_lowerCAmelCase , force_download=_lowerCAmelCase , resume_download=_lowerCAmelCase , proxies=_lowerCAmelCase , use_auth_token=_lowerCAmelCase , revision=_lowerCAmelCase , local_files_only=_lowerCAmelCase , )
return os.path.join(_lowerCAmelCase , _lowerCAmelCase )
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = False , _lowerCAmelCase = False , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = False , **_lowerCAmelCase , ) -> Any:
UpperCamelCase : Optional[int] = get_cached_module_file(
_lowerCAmelCase , _lowerCAmelCase , cache_dir=_lowerCAmelCase , force_download=_lowerCAmelCase , resume_download=_lowerCAmelCase , proxies=_lowerCAmelCase , use_auth_token=_lowerCAmelCase , revision=_lowerCAmelCase , local_files_only=_lowerCAmelCase , )
return get_class_in_module(_lowerCAmelCase , final_module.replace(".py" , "" ) )
| 52
|
import functools
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> int:
UpperCamelCase : Optional[int] = len(_lowerCAmelCase )
UpperCamelCase : List[str] = len(_lowerCAmelCase )
@functools.cache
def min_distance(_lowerCAmelCase , _lowerCAmelCase ) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
UpperCamelCase : Union[str, Any] = int(worda[indexa] != worda[indexa] ) # current letters not identical
return min(
1 + min_distance(indexa + 1 , _lowerCAmelCase ) , 1 + min_distance(_lowerCAmelCase , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , )
return min_distance(0 , 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 52
| 1
|
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def A_ ( _lowerCAmelCase ) -> bool:
UpperCamelCase : int = int(number**0.5 )
return number == sq * sq
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> tuple[int, int]:
UpperCamelCase : int = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
UpperCamelCase : int = x_den * y_den * z_den
UpperCamelCase : int = gcd(_lowerCAmelCase , _lowerCAmelCase )
top //= hcf
bottom //= hcf
return top, bottom
def A_ ( _lowerCAmelCase = 35 ) -> int:
UpperCamelCase : set = set()
UpperCamelCase : int
UpperCamelCase : Fraction = Fraction(0 )
UpperCamelCase : tuple[int, int]
for x_num in range(1 , order + 1 ):
for x_den in range(x_num + 1 , order + 1 ):
for y_num in range(1 , order + 1 ):
for y_den in range(y_num + 1 , order + 1 ):
# n=1
UpperCamelCase : Dict = x_num * y_den + x_den * y_num
UpperCamelCase : Any = x_den * y_den
UpperCamelCase : int = gcd(_lowerCAmelCase , _lowerCAmelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCamelCase : str = add_three(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
unique_s.add(_lowerCAmelCase )
# n=2
UpperCamelCase : List[Any] = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
UpperCamelCase : Any = x_den * x_den * y_den * y_den
if is_sq(_lowerCAmelCase ) and is_sq(_lowerCAmelCase ):
UpperCamelCase : Any = int(sqrt(_lowerCAmelCase ) )
UpperCamelCase : Optional[int] = int(sqrt(_lowerCAmelCase ) )
UpperCamelCase : str = gcd(_lowerCAmelCase , _lowerCAmelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCamelCase : int = add_three(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
unique_s.add(_lowerCAmelCase )
# n=-1
UpperCamelCase : List[str] = x_num * y_num
UpperCamelCase : List[str] = x_den * y_num + x_num * y_den
UpperCamelCase : Any = gcd(_lowerCAmelCase , _lowerCAmelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCamelCase : Tuple = add_three(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
unique_s.add(_lowerCAmelCase )
# n=2
UpperCamelCase : str = x_num * x_num * y_num * y_num
UpperCamelCase : Tuple = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(_lowerCAmelCase ) and is_sq(_lowerCAmelCase ):
UpperCamelCase : Optional[Any] = int(sqrt(_lowerCAmelCase ) )
UpperCamelCase : Tuple = int(sqrt(_lowerCAmelCase ) )
UpperCamelCase : int = gcd(_lowerCAmelCase , _lowerCAmelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCamelCase : List[Any] = add_three(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
unique_s.add(_lowerCAmelCase )
for num, den in unique_s:
total += Fraction(_lowerCAmelCase , _lowerCAmelCase )
return total.denominator + total.numerator
if __name__ == "__main__":
print(f"""{solution() = }""")
| 52
|
import itertools
import random
import unittest
import numpy as np
from transformers import ASTFeatureExtractor
from transformers.testing_utils import require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
__lowerCamelCase : str = random.Random()
if is_torch_available():
import torch
def A_ ( _lowerCAmelCase , _lowerCAmelCase=1.0 , _lowerCAmelCase=None , _lowerCAmelCase=None ) -> Optional[Any]:
if rng is None:
UpperCamelCase : Optional[int] = global_rng
UpperCamelCase : Optional[Any] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class A__ ( unittest.TestCase ):
def __init__( self , A_ , A_=7 , A_=400 , A_=2000 , A_=1 , A_=0.0 , A_=1_6000 , A_=True , A_=True , ):
'''simple docstring'''
UpperCamelCase : Tuple = parent
UpperCamelCase : List[Any] = batch_size
UpperCamelCase : List[Any] = min_seq_length
UpperCamelCase : List[str] = max_seq_length
UpperCamelCase : int = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
UpperCamelCase : Union[str, Any] = feature_size
UpperCamelCase : List[str] = padding_value
UpperCamelCase : Optional[Any] = sampling_rate
UpperCamelCase : List[str] = return_attention_mask
UpperCamelCase : List[Any] = do_normalize
def __UpperCamelCase( self ):
'''simple docstring'''
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def __UpperCamelCase( self , A_=False , A_=False ):
'''simple docstring'''
def _flatten(A_ ):
return list(itertools.chain(*A_ ) )
if equal_length:
UpperCamelCase : List[str] = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
UpperCamelCase : Dict = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
UpperCamelCase : Union[str, Any] = [np.asarray(A_ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class A__ ( __snake_case , unittest.TestCase ):
_UpperCAmelCase :Optional[Any] = ASTFeatureExtractor
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Tuple = ASTFeatureExtractionTester(self )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
UpperCamelCase : Tuple = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
UpperCamelCase : Dict = [np.asarray(A_ ) for speech_input in speech_inputs]
# Test not batched input
UpperCamelCase : Dict = feat_extract(speech_inputs[0] , return_tensors="np" ).input_values
UpperCamelCase : Union[str, Any] = feat_extract(np_speech_inputs[0] , return_tensors="np" ).input_values
self.assertTrue(np.allclose(A_ , A_ , atol=1e-3 ) )
# Test batched
UpperCamelCase : Any = feat_extract(A_ , padding=A_ , return_tensors="np" ).input_values
UpperCamelCase : Any = feat_extract(A_ , padding=A_ , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(A_ , A_ ):
self.assertTrue(np.allclose(A_ , A_ , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
UpperCamelCase : Dict = [floats_list((1, x) )[0] for x in (800, 800, 800)]
UpperCamelCase : int = np.asarray(A_ )
UpperCamelCase : Any = feat_extract(A_ , return_tensors="np" ).input_values
UpperCamelCase : List[str] = feat_extract(A_ , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(A_ , A_ ):
self.assertTrue(np.allclose(A_ , A_ , atol=1e-3 ) )
@require_torch
def __UpperCamelCase( self ):
'''simple docstring'''
import torch
UpperCamelCase : List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase : int = np.random.rand(100 ).astype(np.floataa )
UpperCamelCase : str = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
UpperCamelCase : List[Any] = feature_extractor.pad([{"input_values": inputs}] , return_tensors="np" )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
UpperCamelCase : List[str] = feature_extractor.pad([{"input_values": inputs}] , return_tensors="pt" )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
from datasets import load_dataset
UpperCamelCase : Dict = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
UpperCamelCase : Any = ds.sort("id" ).select(range(A_ ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
@require_torch
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Any = torch.tensor(
[-0.98_94, -1.27_76, -0.90_66, -1.27_76, -0.93_49, -1.26_09, -1.03_86, -1.27_76,
-1.15_61, -1.27_76, -1.20_52, -1.27_23, -1.21_90, -1.21_32, -1.27_76, -1.11_33,
-1.19_53, -1.13_43, -1.15_84, -1.22_03, -1.17_70, -1.24_74, -1.23_81, -1.19_36,
-0.92_70, -0.83_17, -0.80_49, -0.77_06, -0.75_65, -0.78_69] )
# fmt: on
UpperCamelCase : List[Any] = self._load_datasamples(1 )
UpperCamelCase : Tuple = ASTFeatureExtractor()
UpperCamelCase : str = feature_extractor(A_ , return_tensors="pt" ).input_values
self.assertEquals(input_values.shape , (1, 1024, 128) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , A_ , atol=1e-4 ) )
| 52
| 1
|
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
flip_channel_order,
get_resize_output_image_size,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging
if is_vision_available():
import PIL
if is_torch_available():
import torch
__lowerCamelCase : Tuple = logging.get_logger(__name__)
class A__ ( __snake_case ):
_UpperCAmelCase :Optional[Any] = ['pixel_values']
def __init__( self , A_ = True , A_ = None , A_ = PILImageResampling.BILINEAR , A_ = True , A_ = 1 / 255 , A_ = True , A_ = None , A_ = True , **A_ , ):
'''simple docstring'''
super().__init__(**A_ )
UpperCamelCase : Tuple = size if size is not None else {"shortest_edge": 224}
UpperCamelCase : Optional[int] = get_size_dict(A_ , default_to_square=A_ )
UpperCamelCase : int = crop_size if crop_size is not None else {"height": 256, "width": 256}
UpperCamelCase : List[Any] = get_size_dict(A_ , param_name="crop_size" )
UpperCamelCase : Optional[int] = do_resize
UpperCamelCase : Optional[Any] = size
UpperCamelCase : str = resample
UpperCamelCase : str = do_rescale
UpperCamelCase : Dict = rescale_factor
UpperCamelCase : Union[str, Any] = do_center_crop
UpperCamelCase : Optional[Any] = crop_size
UpperCamelCase : Any = do_flip_channel_order
def __UpperCamelCase( self , A_ , A_ , A_ = PIL.Image.BILINEAR , A_ = None , **A_ , ):
'''simple docstring'''
UpperCamelCase : List[str] = get_size_dict(A_ , default_to_square=A_ )
if "shortest_edge" not in size:
raise ValueError(F"""The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}""" )
UpperCamelCase : Tuple = get_resize_output_image_size(A_ , size=size["shortest_edge"] , default_to_square=A_ )
return resize(A_ , size=A_ , resample=A_ , data_format=A_ , **A_ )
def __UpperCamelCase( self , A_ , A_ , A_ = None , **A_ , ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = get_size_dict(A_ )
if "height" not in size or "width" not in size:
raise ValueError(F"""The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}""" )
return center_crop(A_ , size=(size["height"], size["width"]) , data_format=A_ , **A_ )
def __UpperCamelCase( self , A_ , A_ , A_ = None , **A_ , ):
'''simple docstring'''
return rescale(A_ , scale=A_ , data_format=A_ , **A_ )
def __UpperCamelCase( self , A_ , A_ = None ):
'''simple docstring'''
return flip_channel_order(A_ , data_format=A_ )
def __UpperCamelCase( self , A_ , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = ChannelDimension.FIRST , **A_ , ):
'''simple docstring'''
UpperCamelCase : List[Any] = do_resize if do_resize is not None else self.do_resize
UpperCamelCase : Optional[int] = resample if resample is not None else self.resample
UpperCamelCase : Tuple = do_rescale if do_rescale is not None else self.do_rescale
UpperCamelCase : Any = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCamelCase : Optional[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCamelCase : str = (
do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order
)
UpperCamelCase : Dict = size if size is not None else self.size
UpperCamelCase : Dict = get_size_dict(A_ , default_to_square=A_ )
UpperCamelCase : Dict = crop_size if crop_size is not None else self.crop_size
UpperCamelCase : List[Any] = get_size_dict(A_ , param_name="crop_size" )
UpperCamelCase : Optional[Any] = make_list_of_images(A_ )
if not valid_images(A_ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
# All transformations expect numpy arrays.
UpperCamelCase : Dict = [to_numpy_array(A_ ) for image in images]
if do_resize:
UpperCamelCase : List[Any] = [self.resize(image=A_ , size=A_ , resample=A_ ) for image in images]
if do_center_crop:
UpperCamelCase : Union[str, Any] = [self.center_crop(image=A_ , size=A_ ) for image in images]
if do_rescale:
UpperCamelCase : Optional[int] = [self.rescale(image=A_ , scale=A_ ) for image in images]
# the pretrained checkpoints assume images are BGR, not RGB
if do_flip_channel_order:
UpperCamelCase : int = [self.flip_channel_order(image=A_ ) for image in images]
UpperCamelCase : int = [to_channel_dimension_format(A_ , A_ ) for image in images]
UpperCamelCase : Dict = {"pixel_values": images}
return BatchFeature(data=A_ , tensor_type=A_ )
def __UpperCamelCase( self , A_ , A_ = None ):
'''simple docstring'''
UpperCamelCase : List[Any] = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(A_ ) != len(A_ ):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits" )
if is_torch_tensor(A_ ):
UpperCamelCase : Dict = target_sizes.numpy()
UpperCamelCase : Optional[Any] = []
for idx in range(len(A_ ) ):
UpperCamelCase : Optional[Any] = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="bilinear" , align_corners=A_ )
UpperCamelCase : List[str] = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(A_ )
else:
UpperCamelCase : Optional[Any] = logits.argmax(dim=1 )
UpperCamelCase : Union[str, Any] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 52
|
import pickle
import numpy as np
from matplotlib import pyplot as plt
class A__ :
def __init__( self , A_ , A_ , A_ , A_ , A_ , A_=0.2 , A_=0.2 ):
'''simple docstring'''
UpperCamelCase : int = bp_numa
UpperCamelCase : int = bp_numa
UpperCamelCase : List[Any] = bp_numa
UpperCamelCase : Optional[int] = conva_get[:2]
UpperCamelCase : Optional[Any] = conva_get[2]
UpperCamelCase : Dict = size_pa
UpperCamelCase : Union[str, Any] = rate_w
UpperCamelCase : Dict = rate_t
UpperCamelCase : Union[str, Any] = [
np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0] ) + 0.5 )
for i in range(self.conva[1] )
]
UpperCamelCase : Any = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
UpperCamelCase : List[Any] = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
UpperCamelCase : Optional[Any] = -2 * np.random.rand(self.conva[1] ) + 1
UpperCamelCase : Any = -2 * np.random.rand(self.num_bpa ) + 1
UpperCamelCase : int = -2 * np.random.rand(self.num_bpa ) + 1
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
UpperCamelCase : Optional[int] = {
"num_bp1": self.num_bpa,
"num_bp2": self.num_bpa,
"num_bp3": self.num_bpa,
"conv1": self.conva,
"step_conv1": self.step_conva,
"size_pooling1": self.size_poolinga,
"rate_weight": self.rate_weight,
"rate_thre": self.rate_thre,
"w_conv1": self.w_conva,
"wkj": self.wkj,
"vji": self.vji,
"thre_conv1": self.thre_conva,
"thre_bp2": self.thre_bpa,
"thre_bp3": self.thre_bpa,
}
with open(A_ , "wb" ) as f:
pickle.dump(A_ , A_ )
print(F"""Model saved: {save_path}""" )
@classmethod
def __UpperCamelCase( cls , A_ ):
'''simple docstring'''
with open(A_ , "rb" ) as f:
UpperCamelCase : Optional[Any] = pickle.load(A_ ) # noqa: S301
UpperCamelCase : List[Any] = model_dic.get("conv1" )
conv_get.append(model_dic.get("step_conv1" ) )
UpperCamelCase : Union[str, Any] = model_dic.get("size_pooling1" )
UpperCamelCase : List[Any] = model_dic.get("num_bp1" )
UpperCamelCase : Dict = model_dic.get("num_bp2" )
UpperCamelCase : Dict = model_dic.get("num_bp3" )
UpperCamelCase : Dict = model_dic.get("rate_weight" )
UpperCamelCase : str = model_dic.get("rate_thre" )
# create model instance
UpperCamelCase : Any = CNN(A_ , A_ , A_ , A_ , A_ , A_ , A_ )
# modify model parameter
UpperCamelCase : str = model_dic.get("w_conv1" )
UpperCamelCase : Optional[Any] = model_dic.get("wkj" )
UpperCamelCase : int = model_dic.get("vji" )
UpperCamelCase : Any = model_dic.get("thre_conv1" )
UpperCamelCase : Optional[int] = model_dic.get("thre_bp2" )
UpperCamelCase : Union[str, Any] = model_dic.get("thre_bp3" )
return conv_ins
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
return 1 / (1 + np.exp(-1 * x ))
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
return round(A_ , 3 )
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : str = convs[0]
UpperCamelCase : Optional[Any] = convs[1]
UpperCamelCase : Optional[Any] = np.shape(A_ )[0]
# get the data slice of original image data, data_focus
UpperCamelCase : List[str] = []
for i_focus in range(0 , size_data - size_conv + 1 , A_ ):
for j_focus in range(0 , size_data - size_conv + 1 , A_ ):
UpperCamelCase : Union[str, Any] = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(A_ )
# calculate the feature map of every single kernel, and saved as list of matrix
UpperCamelCase : int = []
UpperCamelCase : Optional[Any] = int((size_data - size_conv) / conv_step + 1 )
for i_map in range(A_ ):
UpperCamelCase : str = []
for i_focus in range(len(A_ ) ):
UpperCamelCase : List[Any] = (
np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map] ) )
- thre_convs[i_map]
)
featuremap.append(self.sig(A_ ) )
UpperCamelCase : Optional[int] = np.asmatrix(A_ ).reshape(
A_ , A_ )
data_featuremap.append(A_ )
# expanding the data slice to One dimenssion
UpperCamelCase : List[Any] = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(A_ ) )
UpperCamelCase : Tuple = np.asarray(A_ )
return focus_list, data_featuremap
def __UpperCamelCase( self , A_ , A_ , A_="average_pool" ):
'''simple docstring'''
UpperCamelCase : Any = len(featuremaps[0] )
UpperCamelCase : str = int(size_map / size_pooling )
UpperCamelCase : Optional[int] = []
for i_map in range(len(A_ ) ):
UpperCamelCase : Tuple = featuremaps[i_map]
UpperCamelCase : Any = []
for i_focus in range(0 , A_ , A_ ):
for j_focus in range(0 , A_ , A_ ):
UpperCamelCase : int = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(A_ ) )
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(A_ ) )
UpperCamelCase : Optional[Any] = np.asmatrix(A_ ).reshape(A_ , A_ )
featuremap_pooled.append(A_ )
return featuremap_pooled
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
UpperCamelCase : List[Any] = []
for i in range(len(A_ ) ):
UpperCamelCase : List[Any] = np.shape(data[i] )
UpperCamelCase : str = data[i].reshape(1 , shapes[0] * shapes[1] )
UpperCamelCase : Optional[int] = data_listed.getA().tolist()[0]
data_expanded.extend(A_ )
UpperCamelCase : Any = np.asarray(A_ )
return data_expanded
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
UpperCamelCase : List[Any] = np.asarray(A_ )
UpperCamelCase : List[Any] = np.shape(A_ )
UpperCamelCase : Any = data_mat.reshape(1 , shapes[0] * shapes[1] )
return data_expanded
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : int = []
UpperCamelCase : Optional[int] = 0
for i_map in range(A_ ):
UpperCamelCase : int = np.ones((size_map, size_map) )
for i in range(0 , A_ , A_ ):
for j in range(0 , A_ , A_ ):
UpperCamelCase : str = pd_pool[
i_pool
]
UpperCamelCase : str = i_pool + 1
UpperCamelCase : str = np.multiply(
A_ , np.multiply(out_map[i_map] , (1 - out_map[i_map]) ) )
pd_all.append(A_ )
return pd_all
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_=bool ):
'''simple docstring'''
print("----------------------Start Training-------------------------" )
print((" - - Shape: Train_Data ", np.shape(A_ )) )
print((" - - Shape: Teach_Data ", np.shape(A_ )) )
UpperCamelCase : List[str] = 0
UpperCamelCase : Union[str, Any] = []
UpperCamelCase : int = 1_0000
while rp < n_repeat and mse >= error_accuracy:
UpperCamelCase : Tuple = 0
print(F"""-------------Learning Time {rp}--------------""" )
for p in range(len(A_ ) ):
# print('------------Learning Image: %d--------------'%p)
UpperCamelCase : Any = np.asmatrix(datas_train[p] )
UpperCamelCase : List[str] = np.asarray(datas_teach[p] )
UpperCamelCase , UpperCamelCase : Dict = self.convolute(
A_ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
UpperCamelCase : Tuple = self.pooling(A_ , self.size_poolinga )
UpperCamelCase : int = np.shape(A_ )
UpperCamelCase : List[str] = self._expand(A_ )
UpperCamelCase : Optional[int] = data_bp_input
UpperCamelCase : str = np.dot(A_ , self.vji.T ) - self.thre_bpa
UpperCamelCase : Optional[int] = self.sig(A_ )
UpperCamelCase : List[Any] = np.dot(A_ , self.wkj.T ) - self.thre_bpa
UpperCamelCase : Dict = self.sig(A_ )
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
UpperCamelCase : List[Any] = np.multiply(
(data_teach - bp_outa) , np.multiply(A_ , (1 - bp_outa) ) )
UpperCamelCase : str = np.multiply(
np.dot(A_ , self.wkj ) , np.multiply(A_ , (1 - bp_outa) ) )
UpperCamelCase : Any = np.dot(A_ , self.vji )
UpperCamelCase : Dict = pd_i_all / (self.size_poolinga * self.size_poolinga)
UpperCamelCase : List[Any] = pd_conva_pooled.T.getA().tolist()
UpperCamelCase : List[Any] = self._calculate_gradient_from_pool(
A_ , A_ , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , )
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1] ):
UpperCamelCase : List[Any] = self._expand_mat(pd_conva_all[k_conv] )
UpperCamelCase : List[Any] = self.rate_weight * np.dot(A_ , A_ )
UpperCamelCase : str = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]) )
UpperCamelCase : Dict = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv] ) * self.rate_thre
)
# all connected layer
UpperCamelCase : Optional[Any] = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
UpperCamelCase : List[Any] = self.vji + pd_j_all.T * bp_outa * self.rate_weight
UpperCamelCase : Optional[Any] = self.thre_bpa - pd_k_all * self.rate_thre
UpperCamelCase : List[str] = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
UpperCamelCase : List[Any] = np.sum(abs(data_teach - bp_outa ) )
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
UpperCamelCase : Any = rp + 1
UpperCamelCase : Union[str, Any] = error_count / patterns
all_mse.append(A_ )
def draw_error():
UpperCamelCase : Tuple = [error_accuracy for i in range(int(n_repeat * 1.2 ) )]
plt.plot(A_ , "+-" )
plt.plot(A_ , "r--" )
plt.xlabel("Learning Times" )
plt.ylabel("All_mse" )
plt.grid(A_ , alpha=0.5 )
plt.show()
print("------------------Training Complished---------------------" )
print((" - - Training epoch: ", rp, F""" - - Mse: {mse:.6f}""") )
if draw_e:
draw_error()
return mse
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = []
print("-------------------Start Testing-------------------------" )
print((" - - Shape: Test_Data ", np.shape(A_ )) )
for p in range(len(A_ ) ):
UpperCamelCase : int = np.asmatrix(datas_test[p] )
UpperCamelCase , UpperCamelCase : Any = self.convolute(
A_ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
UpperCamelCase : List[str] = self.pooling(A_ , self.size_poolinga )
UpperCamelCase : Dict = self._expand(A_ )
UpperCamelCase : List[Any] = data_bp_input
UpperCamelCase : Any = bp_outa * self.vji.T - self.thre_bpa
UpperCamelCase : List[Any] = self.sig(A_ )
UpperCamelCase : int = bp_outa * self.wkj.T - self.thre_bpa
UpperCamelCase : Optional[int] = self.sig(A_ )
produce_out.extend(bp_outa.getA().tolist() )
UpperCamelCase : List[str] = [list(map(self.do_round , A_ ) ) for each in produce_out]
return np.asarray(A_ )
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = np.asmatrix(A_ )
UpperCamelCase , UpperCamelCase : List[Any] = self.convolute(
A_ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
UpperCamelCase : str = self.pooling(A_ , self.size_poolinga )
return data_conveda, data_pooleda
if __name__ == "__main__":
pass
| 52
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCamelCase : List[str] = {
"""configuration_whisper""": ["""WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """WhisperConfig""", """WhisperOnnxConfig"""],
"""feature_extraction_whisper""": ["""WhisperFeatureExtractor"""],
"""processing_whisper""": ["""WhisperProcessor"""],
"""tokenization_whisper""": ["""WhisperTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Dict = ["""WhisperTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Union[str, Any] = [
"""WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""WhisperForConditionalGeneration""",
"""WhisperModel""",
"""WhisperPreTrainedModel""",
"""WhisperForAudioClassification""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Dict = [
"""TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFWhisperForConditionalGeneration""",
"""TFWhisperModel""",
"""TFWhisperPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Any = [
"""FlaxWhisperForConditionalGeneration""",
"""FlaxWhisperModel""",
"""FlaxWhisperPreTrainedModel""",
"""FlaxWhisperForAudioClassification""",
]
if TYPE_CHECKING:
from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig
from .feature_extraction_whisper import WhisperFeatureExtractor
from .processing_whisper import WhisperProcessor
from .tokenization_whisper import WhisperTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_whisper_fast import WhisperTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_whisper import (
WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
WhisperForAudioClassification,
WhisperForConditionalGeneration,
WhisperModel,
WhisperPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_whisper import (
TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWhisperForConditionalGeneration,
TFWhisperModel,
TFWhisperPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_whisper import (
FlaxWhisperForAudioClassification,
FlaxWhisperForConditionalGeneration,
FlaxWhisperModel,
FlaxWhisperPreTrainedModel,
)
else:
import sys
__lowerCamelCase : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 52
|
import warnings
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
__lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
__lowerCamelCase : Any = {
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/config.json""",
# See all BART models at https://huggingface.co/models?filter=bart
}
class A__ ( __snake_case ):
_UpperCAmelCase :Dict = 'bart'
_UpperCAmelCase :str = ['past_key_values']
_UpperCAmelCase :Any = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self , A_=5_0265 , A_=1024 , A_=12 , A_=4096 , A_=16 , A_=12 , A_=4096 , A_=16 , A_=0.0 , A_=0.0 , A_="gelu" , A_=1024 , A_=0.1 , A_=0.0 , A_=0.0 , A_=0.02 , A_=0.0 , A_=False , A_=True , A_=3 , A_=1 , A_=0 , A_=2 , A_=True , A_=2 , A_=2 , **A_ , ):
'''simple docstring'''
UpperCamelCase : int = vocab_size
UpperCamelCase : List[Any] = max_position_embeddings
UpperCamelCase : Any = d_model
UpperCamelCase : Optional[Any] = encoder_ffn_dim
UpperCamelCase : List[Any] = encoder_layers
UpperCamelCase : int = encoder_attention_heads
UpperCamelCase : Optional[int] = decoder_ffn_dim
UpperCamelCase : List[str] = decoder_layers
UpperCamelCase : Optional[int] = decoder_attention_heads
UpperCamelCase : int = dropout
UpperCamelCase : int = attention_dropout
UpperCamelCase : Tuple = activation_dropout
UpperCamelCase : Tuple = activation_function
UpperCamelCase : int = init_std
UpperCamelCase : List[Any] = encoder_layerdrop
UpperCamelCase : List[str] = decoder_layerdrop
UpperCamelCase : Dict = classifier_dropout
UpperCamelCase : Optional[int] = use_cache
UpperCamelCase : List[Any] = encoder_layers
UpperCamelCase : int = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
num_labels=A_ , pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , is_encoder_decoder=A_ , decoder_start_token_id=A_ , forced_eos_token_id=A_ , **A_ , )
# ensure backward compatibility for BART CNN models
if self.forced_bos_token_id is None and kwargs.get("force_bos_token_to_be_generated" , A_ ):
UpperCamelCase : int = self.bos_token_id
warnings.warn(
F"""Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. """
"The config can simply be saved and uploaded again to be fixed." )
class A__ ( __snake_case ):
@property
def __UpperCamelCase( self ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
UpperCamelCase : Optional[int] = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
UpperCamelCase : List[str] = {0: "batch"}
UpperCamelCase : Dict = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
UpperCamelCase : Dict = {0: "batch", 1: "decoder_sequence"}
UpperCamelCase : Union[str, Any] = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(A_ , direction="inputs" )
elif self.task == "causal-lm":
# TODO: figure this case out.
UpperCamelCase : Any = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
UpperCamelCase , UpperCamelCase : Optional[int] = self.num_layers
for i in range(A_ ):
UpperCamelCase : Optional[Any] = {0: "batch", 2: "past_sequence + sequence"}
UpperCamelCase : Union[str, Any] = {0: "batch", 2: "past_sequence + sequence"}
else:
UpperCamelCase : Optional[Any] = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}),
("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}),
] )
return common_inputs
@property
def __UpperCamelCase( self ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
UpperCamelCase : Tuple = super().outputs
else:
UpperCamelCase : Dict = super(A_ , self ).outputs
if self.use_past:
UpperCamelCase , UpperCamelCase : int = self.num_layers
for i in range(A_ ):
UpperCamelCase : int = {0: "batch", 2: "past_sequence + sequence"}
UpperCamelCase : Tuple = {0: "batch", 2: "past_sequence + sequence"}
return common_outputs
def __UpperCamelCase( self , A_ , A_ = -1 , A_ = -1 , A_ = False , A_ = None , ):
'''simple docstring'''
UpperCamelCase : List[Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
A_ , A_ , A_ , A_ , A_ )
# Generate decoder inputs
UpperCamelCase : List[Any] = seq_length if not self.use_past else 1
UpperCamelCase : Tuple = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
A_ , A_ , A_ , A_ , A_ )
UpperCamelCase : Optional[int] = {F"""decoder_{name}""": tensor for name, tensor in decoder_inputs.items()}
UpperCamelCase : List[Any] = dict(**A_ , **A_ )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
UpperCamelCase , UpperCamelCase : Optional[Any] = common_inputs["input_ids"].shape
UpperCamelCase : List[Any] = common_inputs["decoder_input_ids"].shape[1]
UpperCamelCase , UpperCamelCase : List[str] = self.num_attention_heads
UpperCamelCase : int = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
UpperCamelCase : List[Any] = decoder_seq_length + 3
UpperCamelCase : str = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
UpperCamelCase : int = torch.cat(
[common_inputs["decoder_attention_mask"], torch.ones(A_ , A_ )] , dim=1 )
UpperCamelCase : int = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
UpperCamelCase , UpperCamelCase : Union[str, Any] = self.num_layers
UpperCamelCase : Any = min(A_ , A_ )
UpperCamelCase : List[str] = max(A_ , A_ ) - min_num_layers
UpperCamelCase : Dict = "encoder" if num_encoder_layers > num_decoder_layers else "decoder"
for _ in range(A_ ):
common_inputs["past_key_values"].append(
(
torch.zeros(A_ ),
torch.zeros(A_ ),
torch.zeros(A_ ),
torch.zeros(A_ ),
) )
# TODO: test this.
UpperCamelCase : Optional[Any] = encoder_shape if remaining_side_name == "encoder" else decoder_shape
for _ in range(A_ , A_ ):
common_inputs["past_key_values"].append((torch.zeros(A_ ), torch.zeros(A_ )) )
return common_inputs
def __UpperCamelCase( self , A_ , A_ = -1 , A_ = -1 , A_ = False , A_ = None , ):
'''simple docstring'''
UpperCamelCase : int = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
A_ , A_ , A_ , A_ , A_ )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
UpperCamelCase , UpperCamelCase : Union[str, Any] = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
UpperCamelCase : Optional[Any] = seqlen + 2
UpperCamelCase , UpperCamelCase : List[Any] = self.num_layers
UpperCamelCase , UpperCamelCase : Optional[int] = self.num_attention_heads
UpperCamelCase : str = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
UpperCamelCase : Optional[Any] = common_inputs["attention_mask"].dtype
UpperCamelCase : int = torch.cat(
[common_inputs["attention_mask"], torch.ones(A_ , A_ , dtype=A_ )] , dim=1 )
UpperCamelCase : Optional[Any] = [
(torch.zeros(A_ ), torch.zeros(A_ )) for _ in range(A_ )
]
return common_inputs
def __UpperCamelCase( self , A_ , A_ = -1 , A_ = -1 , A_ = False , A_ = None , ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = compute_effective_axis_dimension(
A_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
UpperCamelCase : Union[str, Any] = tokenizer.num_special_tokens_to_add(A_ )
UpperCamelCase : int = compute_effective_axis_dimension(
A_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=A_ )
# Generate dummy inputs according to compute batch and sequence
UpperCamelCase : int = [" ".join([tokenizer.unk_token] ) * seq_length] * batch_size
UpperCamelCase : Dict = dict(tokenizer(A_ , return_tensors=A_ ) )
return common_inputs
def __UpperCamelCase( self , A_ , A_ = -1 , A_ = -1 , A_ = False , A_ = None , ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
UpperCamelCase : Optional[int] = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
A_ , batch_size=A_ , seq_length=A_ , is_pair=A_ , framework=A_ )
elif self.task == "causal-lm":
UpperCamelCase : List[str] = self._generate_dummy_inputs_for_causal_lm(
A_ , batch_size=A_ , seq_length=A_ , is_pair=A_ , framework=A_ )
else:
UpperCamelCase : List[str] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
A_ , batch_size=A_ , seq_length=A_ , is_pair=A_ , framework=A_ )
return common_inputs
def __UpperCamelCase( self , A_ , A_ , A_ , A_ ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
UpperCamelCase : Optional[Any] = super()._flatten_past_key_values_(A_ , A_ , A_ , A_ )
else:
UpperCamelCase : Optional[Any] = super(A_ , self )._flatten_past_key_values_(
A_ , A_ , A_ , A_ )
| 52
| 1
|
import argparse
import torch
# Step 1. clone https://github.com/microsoft/unilm
# Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd
# Step 3. cd unilm
# Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink
# import classes
from unilm.wavlm.WavLM import WavLM as WavLMOrig
from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig
from transformers import WavLMConfig, WavLMModel, logging
logging.set_verbosity_info()
__lowerCamelCase : List[str] = logging.get_logger(__name__)
__lowerCamelCase : List[Any] = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn.grep_linear""": """encoder.layers.*.attention.gru_rel_pos_linear""",
"""self_attn.relative_attention_bias""": """encoder.layers.*.attention.rel_attn_embed""",
"""self_attn.grep_a""": """encoder.layers.*.attention.gru_rel_pos_const""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """ctc_proj""",
"""mask_emb""": """masked_spec_embed""",
}
__lowerCamelCase : str = [
"""ctc_proj""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Dict:
for attribute in key.split("." ):
UpperCamelCase : Dict = getattr(_lowerCAmelCase , _lowerCAmelCase )
if weight_type is not None:
UpperCamelCase : Union[str, Any] = getattr(_lowerCAmelCase , _lowerCAmelCase ).shape
else:
UpperCamelCase : Tuple = hf_pointer.shape
assert hf_shape == value.shape, (
F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
UpperCamelCase : int = value
elif weight_type == "weight_g":
UpperCamelCase : Optional[int] = value
elif weight_type == "weight_v":
UpperCamelCase : str = value
elif weight_type == "bias":
UpperCamelCase : Union[str, Any] = value
else:
UpperCamelCase : List[str] = value
logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> str:
UpperCamelCase : Optional[int] = []
UpperCamelCase : Optional[int] = fairseq_model.state_dict()
UpperCamelCase : Optional[Any] = hf_model.feature_extractor
for name, value in fairseq_dict.items():
UpperCamelCase : Dict = False
if "conv_layers" in name:
load_conv_layer(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , hf_model.config.feat_extract_norm == "group" , )
UpperCamelCase : Any = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
UpperCamelCase : Dict = True
if "*" in mapped_key:
UpperCamelCase : Any = name.split(_lowerCAmelCase )[0].split("." )[-2]
UpperCamelCase : Optional[int] = mapped_key.replace("*" , _lowerCAmelCase )
if "weight_g" in name:
UpperCamelCase : List[str] = "weight_g"
elif "weight_v" in name:
UpperCamelCase : str = "weight_v"
elif "bias" in name and "relative_attention_bias" not in name:
UpperCamelCase : Optional[Any] = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
UpperCamelCase : int = "weight"
else:
UpperCamelCase : str = None
set_recursively(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
continue
if not is_used:
unused_weights.append(_lowerCAmelCase )
logger.warning(F"""Unused weights: {unused_weights}""" )
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Tuple:
UpperCamelCase : str = full_name.split("conv_layers." )[-1]
UpperCamelCase : List[Any] = name.split("." )
UpperCamelCase : Union[str, Any] = int(items[0] )
UpperCamelCase : Tuple = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
UpperCamelCase : List[Any] = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
UpperCamelCase : str = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
UpperCamelCase : Any = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
UpperCamelCase : Optional[Any] = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(_lowerCAmelCase )
@torch.no_grad()
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None ) -> str:
# load the pre-trained checkpoints
UpperCamelCase : Tuple = torch.load(_lowerCAmelCase )
UpperCamelCase : List[Any] = WavLMConfigOrig(checkpoint["cfg"] )
UpperCamelCase : Any = WavLMOrig(_lowerCAmelCase )
model.load_state_dict(checkpoint["model"] )
model.eval()
if config_path is not None:
UpperCamelCase : Dict = WavLMConfig.from_pretrained(_lowerCAmelCase )
else:
UpperCamelCase : Tuple = WavLMConfig()
UpperCamelCase : Any = WavLMModel(_lowerCAmelCase )
recursively_load_weights(_lowerCAmelCase , _lowerCAmelCase )
hf_wavlm.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
__lowerCamelCase : Tuple = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
__lowerCamelCase : int = parser.parse_args()
convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 52
|
from math import sqrt
def A_ ( _lowerCAmelCase ) -> bool:
assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (
number >= 0
), "'number' must been an int and positive"
UpperCamelCase : List[Any] = True
# 0 and 1 are none primes.
if number <= 1:
UpperCamelCase : List[Any] = False
for divisor in range(2 , int(round(sqrt(_lowerCAmelCase ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
UpperCamelCase : Union[str, Any] = False
break
# precondition
assert isinstance(_lowerCAmelCase , _lowerCAmelCase ), "'status' must been from type bool"
return status
def A_ ( _lowerCAmelCase ) -> Any:
assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
UpperCamelCase : int = list(range(2 , n + 1 ) )
UpperCamelCase : Optional[int] = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(_lowerCAmelCase ) ):
for j in range(i + 1 , len(_lowerCAmelCase ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
UpperCamelCase : Tuple = 0
# filters actual prime numbers.
UpperCamelCase : str = [x for x in begin_list if x != 0]
# precondition
assert isinstance(_lowerCAmelCase , _lowerCAmelCase ), "'ans' must been from type list"
return ans
def A_ ( _lowerCAmelCase ) -> Optional[Any]:
assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (n > 2), "'N' must been an int and > 2"
UpperCamelCase : str = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 , n + 1 ):
if is_prime(_lowerCAmelCase ):
ans.append(_lowerCAmelCase )
# precondition
assert isinstance(_lowerCAmelCase , _lowerCAmelCase ), "'ans' must been from type list"
return ans
def A_ ( _lowerCAmelCase ) -> Any:
assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and number >= 0, "'number' must been an int and >= 0"
UpperCamelCase : Optional[Any] = [] # this list will be returns of the function.
# potential prime number factors.
UpperCamelCase : Tuple = 2
UpperCamelCase : str = number
if number == 0 or number == 1:
ans.append(_lowerCAmelCase )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(_lowerCAmelCase ):
while quotient != 1:
if is_prime(_lowerCAmelCase ) and (quotient % factor == 0):
ans.append(_lowerCAmelCase )
quotient /= factor
else:
factor += 1
else:
ans.append(_lowerCAmelCase )
# precondition
assert isinstance(_lowerCAmelCase , _lowerCAmelCase ), "'ans' must been from type list"
return ans
def A_ ( _lowerCAmelCase ) -> Any:
assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (
number >= 0
), "'number' bust been an int and >= 0"
UpperCamelCase : List[Any] = 0
# prime factorization of 'number'
UpperCamelCase : Any = prime_factorization(_lowerCAmelCase )
UpperCamelCase : List[Any] = max(_lowerCAmelCase )
# precondition
assert isinstance(_lowerCAmelCase , _lowerCAmelCase ), "'ans' must been from type int"
return ans
def A_ ( _lowerCAmelCase ) -> Union[str, Any]:
assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (
number >= 0
), "'number' bust been an int and >= 0"
UpperCamelCase : List[Any] = 0
# prime factorization of 'number'
UpperCamelCase : Dict = prime_factorization(_lowerCAmelCase )
UpperCamelCase : List[Any] = min(_lowerCAmelCase )
# precondition
assert isinstance(_lowerCAmelCase , _lowerCAmelCase ), "'ans' must been from type int"
return ans
def A_ ( _lowerCAmelCase ) -> Optional[Any]:
assert isinstance(_lowerCAmelCase , _lowerCAmelCase ), "'number' must been an int"
assert isinstance(number % 2 == 0 , _lowerCAmelCase ), "compare bust been from type bool"
return number % 2 == 0
def A_ ( _lowerCAmelCase ) -> List[Any]:
assert isinstance(_lowerCAmelCase , _lowerCAmelCase ), "'number' must been an int"
assert isinstance(number % 2 != 0 , _lowerCAmelCase ), "compare bust been from type bool"
return number % 2 != 0
def A_ ( _lowerCAmelCase ) -> Any:
assert (
isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (number > 2) and is_even(_lowerCAmelCase )
), "'number' must been an int, even and > 2"
UpperCamelCase : List[str] = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
UpperCamelCase : Dict = get_prime_numbers(_lowerCAmelCase )
UpperCamelCase : Tuple = len(_lowerCAmelCase )
# run variable for while-loops.
UpperCamelCase : Optional[int] = 0
UpperCamelCase : int = None
# exit variable. for break up the loops
UpperCamelCase : Union[str, Any] = True
while i < len_pn and loop:
UpperCamelCase : Tuple = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
UpperCamelCase : Any = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(_lowerCAmelCase , _lowerCAmelCase )
and (len(_lowerCAmelCase ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> str:
assert (
isinstance(_lowerCAmelCase , _lowerCAmelCase )
and isinstance(_lowerCAmelCase , _lowerCAmelCase )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
UpperCamelCase : Tuple = 0
while numbera != 0:
UpperCamelCase : Tuple = numbera % numbera
UpperCamelCase : Any = numbera
UpperCamelCase : Union[str, Any] = rest
# precondition
assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> int:
assert (
isinstance(_lowerCAmelCase , _lowerCAmelCase )
and isinstance(_lowerCAmelCase , _lowerCAmelCase )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
UpperCamelCase : Optional[int] = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
UpperCamelCase : List[Any] = prime_factorization(_lowerCAmelCase )
UpperCamelCase : Union[str, Any] = prime_factorization(_lowerCAmelCase )
elif numbera == 1 or numbera == 1:
UpperCamelCase : Optional[Any] = []
UpperCamelCase : int = []
UpperCamelCase : List[Any] = max(_lowerCAmelCase , _lowerCAmelCase )
UpperCamelCase : Optional[int] = 0
UpperCamelCase : Tuple = 0
UpperCamelCase : List[str] = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
UpperCamelCase : str = prime_fac_a.count(_lowerCAmelCase )
UpperCamelCase : Tuple = prime_fac_a.count(_lowerCAmelCase )
for _ in range(max(_lowerCAmelCase , _lowerCAmelCase ) ):
ans *= n
else:
UpperCamelCase : str = prime_fac_a.count(_lowerCAmelCase )
for _ in range(_lowerCAmelCase ):
ans *= n
done.append(_lowerCAmelCase )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
UpperCamelCase : Any = prime_fac_a.count(_lowerCAmelCase )
for _ in range(_lowerCAmelCase ):
ans *= n
done.append(_lowerCAmelCase )
# precondition
assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def A_ ( _lowerCAmelCase ) -> Tuple:
assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (n >= 0), "'number' must been a positive int"
UpperCamelCase : int = 0
UpperCamelCase : int = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(_lowerCAmelCase ):
ans += 1
# precondition
assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and is_prime(
_lowerCAmelCase ), "'ans' must been a prime number and from type int"
return ans
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> int:
assert (
is_prime(_lowerCAmelCase ) and is_prime(_lowerCAmelCase ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
UpperCamelCase : str = p_number_a + 1 # jump to the next number
UpperCamelCase : Dict = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(_lowerCAmelCase ):
number += 1
while number < p_number_a:
ans.append(_lowerCAmelCase )
number += 1
# fetch the next prime number.
while not is_prime(_lowerCAmelCase ):
number += 1
# precondition
assert (
isinstance(_lowerCAmelCase , _lowerCAmelCase )
and ans[0] != p_number_a
and ans[len(_lowerCAmelCase ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def A_ ( _lowerCAmelCase ) -> List[str]:
assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (n >= 1), "'n' must been int and >= 1"
UpperCamelCase : Dict = [] # will be returned.
for divisor in range(1 , n + 1 ):
if n % divisor == 0:
ans.append(_lowerCAmelCase )
# precondition
assert ans[0] == 1 and ans[len(_lowerCAmelCase ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def A_ ( _lowerCAmelCase ) -> int:
assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (
number > 1
), "'number' must been an int and >= 1"
UpperCamelCase : int = get_divisors(_lowerCAmelCase )
# precondition
assert (
isinstance(_lowerCAmelCase , _lowerCAmelCase )
and (divisors[0] == 1)
and (divisors[len(_lowerCAmelCase ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]:
assert (
isinstance(_lowerCAmelCase , _lowerCAmelCase )
and isinstance(_lowerCAmelCase , _lowerCAmelCase )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
UpperCamelCase : List[str] = gcd(abs(_lowerCAmelCase ) , abs(_lowerCAmelCase ) )
# precondition
assert (
isinstance(_lowerCAmelCase , _lowerCAmelCase )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def A_ ( _lowerCAmelCase ) -> Dict:
assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (n >= 0), "'n' must been a int and >= 0"
UpperCamelCase : str = 1 # this will be return.
for factor in range(1 , n + 1 ):
ans *= factor
return ans
def A_ ( _lowerCAmelCase ) -> Tuple:
assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (n >= 0), "'n' must been an int and >= 0"
UpperCamelCase : Dict = 0
UpperCamelCase : Dict = 1
UpperCamelCase : Union[str, Any] = 1 # this will be return
for _ in range(n - 1 ):
UpperCamelCase : Any = ans
ans += fiba
UpperCamelCase : str = tmp
return ans
| 52
| 1
|
class A__ :
def __init__( self , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = None
UpperCamelCase : int = None
UpperCamelCase : Optional[int] = graph
self._normalize_graph(A_ , A_ )
UpperCamelCase : Tuple = len(A_ )
UpperCamelCase : str = None
def __UpperCamelCase( self , A_ , A_ ):
'''simple docstring'''
if sources is int:
UpperCamelCase : int = [sources]
if sinks is int:
UpperCamelCase : Union[str, Any] = [sinks]
if len(A_ ) == 0 or len(A_ ) == 0:
return
UpperCamelCase : Any = sources[0]
UpperCamelCase : Union[str, Any] = sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(A_ ) > 1 or len(A_ ) > 1:
UpperCamelCase : Any = 0
for i in sources:
max_input_flow += sum(self.graph[i] )
UpperCamelCase : List[Any] = len(self.graph ) + 1
for room in self.graph:
room.insert(0 , 0 )
self.graph.insert(0 , [0] * size )
for i in sources:
UpperCamelCase : Union[str, Any] = max_input_flow
UpperCamelCase : str = 0
UpperCamelCase : List[Any] = len(self.graph ) + 1
for room in self.graph:
room.append(0 )
self.graph.append([0] * size )
for i in sinks:
UpperCamelCase : List[Any] = max_input_flow
UpperCamelCase : Optional[Any] = size - 1
def __UpperCamelCase( self ):
'''simple docstring'''
if self.maximum_flow_algorithm is None:
raise Exception("You need to set maximum flow algorithm before." )
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
UpperCamelCase : Optional[int] = algorithm(self )
class A__ :
def __init__( self , A_ ):
'''simple docstring'''
UpperCamelCase : Dict = flow_network
UpperCamelCase : str = flow_network.verticesCount
UpperCamelCase : Dict = flow_network.sourceIndex
UpperCamelCase : Optional[Any] = flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
UpperCamelCase : Union[str, Any] = flow_network.graph
UpperCamelCase : Optional[int] = False
def __UpperCamelCase( self ):
'''simple docstring'''
if not self.executed:
self._algorithm()
UpperCamelCase : Dict = True
def __UpperCamelCase( self ):
'''simple docstring'''
pass
class A__ ( __snake_case ):
def __init__( self , A_ ):
'''simple docstring'''
super().__init__(A_ )
# use this to save your result
UpperCamelCase : List[Any] = -1
def __UpperCamelCase( self ):
'''simple docstring'''
if not self.executed:
raise Exception("You should execute algorithm before using its result!" )
return self.maximum_flow
class A__ ( __snake_case ):
def __init__( self , A_ ):
'''simple docstring'''
super().__init__(A_ )
UpperCamelCase : Optional[int] = [[0] * self.verticies_count for i in range(self.verticies_count )]
UpperCamelCase : str = [0] * self.verticies_count
UpperCamelCase : Dict = [0] * self.verticies_count
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
UpperCamelCase : int = [
i
for i in range(self.verticies_count )
if i != self.source_index and i != self.sink_index
]
# move through list
UpperCamelCase : int = 0
while i < len(A_ ):
UpperCamelCase : List[str] = vertices_list[i]
UpperCamelCase : str = self.heights[vertex_index]
self.process_vertex(A_ )
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0 , vertices_list.pop(A_ ) )
UpperCamelCase : Optional[int] = 0
else:
i += 1
UpperCamelCase : Dict = sum(self.preflow[self.source_index] )
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count ):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(A_ , A_ )
self.relabel(A_ )
def __UpperCamelCase( self , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : Dict = min(
self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , )
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
UpperCamelCase : Any = None
for to_index in range(self.verticies_count ):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
UpperCamelCase : Tuple = self.heights[to_index]
if min_height is not None:
UpperCamelCase : Optional[Any] = min_height + 1
if __name__ == "__main__":
__lowerCamelCase : Optional[Any] = [0]
__lowerCamelCase : str = [3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
__lowerCamelCase : Union[str, Any] = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
__lowerCamelCase : Dict = FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
__lowerCamelCase : int = flow_network.find_maximum_flow()
print(f"""maximum flow is {maximum_flow}""")
| 52
|
import inspect
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
__lowerCamelCase : str = """src/transformers"""
# This is to make sure the transformers module imported is the one in the repo.
__lowerCamelCase : Tuple = direct_transformers_import(PATH_TO_TRANSFORMERS)
__lowerCamelCase : List[str] = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
__lowerCamelCase : Optional[Any] = re.compile(r"""\[(.+?)\]\((https://huggingface\.co/.+?)\)""")
__lowerCamelCase : List[str] = {
"""DecisionTransformerConfig""",
"""EncoderDecoderConfig""",
"""MusicgenConfig""",
"""RagConfig""",
"""SpeechEncoderDecoderConfig""",
"""TimmBackboneConfig""",
"""VisionEncoderDecoderConfig""",
"""VisionTextDualEncoderConfig""",
"""LlamaConfig""",
}
def A_ ( _lowerCAmelCase ) -> List[str]:
UpperCamelCase : Optional[Any] = None
# source code of `config_class`
UpperCamelCase : Tuple = inspect.getsource(_lowerCAmelCase )
UpperCamelCase : Optional[Any] = _re_checkpoint.findall(_lowerCAmelCase )
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
for ckpt_name, ckpt_link in checkpoints:
# allow the link to end with `/`
if ckpt_link.endswith("/" ):
UpperCamelCase : Dict = ckpt_link[:-1]
# verify the checkpoint name corresponds to the checkpoint link
UpperCamelCase : Any = F"""https://huggingface.co/{ckpt_name}"""
if ckpt_link == ckpt_link_from_name:
UpperCamelCase : List[Any] = ckpt_name
break
return checkpoint
def A_ ( ) -> List[str]:
UpperCamelCase : Optional[int] = []
for config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in config_class.__module__:
continue
UpperCamelCase : Union[str, Any] = get_checkpoint_from_config_class(_lowerCAmelCase )
UpperCamelCase : Optional[int] = config_class.__name__
if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(_lowerCAmelCase )
if len(_lowerCAmelCase ) > 0:
UpperCamelCase : Any = "\n".join(sorted(_lowerCAmelCase ) )
raise ValueError(F"""The following configurations don't contain any valid checkpoint:\n{message}""" )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 52
| 1
|
import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEncoder,
BertModel,
BertPreTrainedModel,
)
__lowerCamelCase : Dict = logging.getLogger(__name__)
class A__ ( __snake_case ):
def __UpperCamelCase( self , A_ , A_ , A_=None , A_=None ):
'''simple docstring'''
UpperCamelCase : Any = self.layer[current_layer](A_ , A_ , head_mask[current_layer] )
UpperCamelCase : List[str] = layer_outputs[0]
return hidden_states
@add_start_docstrings(
'The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top.' , __snake_case , )
class A__ ( __snake_case ):
def __init__( self , A_ ):
'''simple docstring'''
super().__init__(A_ )
UpperCamelCase : Optional[int] = BertEncoderWithPabee(A_ )
self.init_weights()
UpperCamelCase : int = 0
UpperCamelCase : Tuple = 0
UpperCamelCase : Union[str, Any] = 0
UpperCamelCase : str = 0
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
UpperCamelCase : int = threshold
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = patience
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = 0
UpperCamelCase : List[str] = 0
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Dict = self.inference_layers_num / self.inference_instances_num
UpperCamelCase : Optional[int] = (
F"""*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up ="""
F""" {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***"""
)
print(A_ )
@add_start_docstrings_to_model_forward(A_ )
def __UpperCamelCase( self , A_=None , A_=None , A_=None , A_=None , A_=None , A_=None , A_=None , A_=None , A_=None , A_=None , A_=False , ):
'''simple docstring'''
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time" )
elif input_ids is not None:
UpperCamelCase : Optional[int] = input_ids.size()
elif inputs_embeds is not None:
UpperCamelCase : List[str] = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds" )
UpperCamelCase : Any = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
UpperCamelCase : Optional[int] = torch.ones(A_ , device=A_ )
if token_type_ids is None:
UpperCamelCase : Optional[int] = torch.zeros(A_ , dtype=torch.long , device=A_ )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
UpperCamelCase : torch.Tensor = self.get_extended_attention_mask(A_ , A_ , A_ )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
UpperCamelCase , UpperCamelCase , UpperCamelCase : Optional[int] = encoder_hidden_states.size()
UpperCamelCase : List[str] = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
UpperCamelCase : Union[str, Any] = torch.ones(A_ , device=A_ )
UpperCamelCase : Any = self.invert_attention_mask(A_ )
else:
UpperCamelCase : Optional[int] = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
UpperCamelCase : Any = self.get_head_mask(A_ , self.config.num_hidden_layers )
UpperCamelCase : Tuple = self.embeddings(
input_ids=A_ , position_ids=A_ , token_type_ids=A_ , inputs_embeds=A_ )
UpperCamelCase : Union[str, Any] = embedding_output
if self.training:
UpperCamelCase : List[str] = []
for i in range(self.config.num_hidden_layers ):
UpperCamelCase : Optional[Any] = self.encoder.adaptive_forward(
A_ , current_layer=A_ , attention_mask=A_ , head_mask=A_ )
UpperCamelCase : Optional[Any] = self.pooler(A_ )
UpperCamelCase : List[str] = output_layers[i](output_dropout(A_ ) )
res.append(A_ )
elif self.patience == 0: # Use all layers for inference
UpperCamelCase : Optional[int] = self.encoder(
A_ , attention_mask=A_ , head_mask=A_ , encoder_hidden_states=A_ , encoder_attention_mask=A_ , )
UpperCamelCase : Any = self.pooler(encoder_outputs[0] )
UpperCamelCase : int = [output_layers[self.config.num_hidden_layers - 1](A_ )]
else:
UpperCamelCase : Any = 0
UpperCamelCase : Tuple = None
UpperCamelCase : Tuple = 0
for i in range(self.config.num_hidden_layers ):
calculated_layer_num += 1
UpperCamelCase : Any = self.encoder.adaptive_forward(
A_ , current_layer=A_ , attention_mask=A_ , head_mask=A_ )
UpperCamelCase : Dict = self.pooler(A_ )
UpperCamelCase : Optional[Any] = output_layers[i](A_ )
if regression:
UpperCamelCase : int = logits.detach()
if patient_result is not None:
UpperCamelCase : List[Any] = patient_result.detach()
if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold:
patient_counter += 1
else:
UpperCamelCase : Tuple = 0
else:
UpperCamelCase : str = logits.detach().argmax(dim=1 )
if patient_result is not None:
UpperCamelCase : List[str] = patient_result.detach().argmax(dim=1 )
if (patient_result is not None) and torch.all(labels.eq(A_ ) ):
patient_counter += 1
else:
UpperCamelCase : Dict = 0
UpperCamelCase : Optional[Any] = logits
if patient_counter == self.patience:
break
UpperCamelCase : List[Any] = [patient_result]
self.inference_layers_num += calculated_layer_num
self.inference_instances_num += 1
return res
@add_start_docstrings(
'Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of\n the pooled output) e.g. for GLUE tasks. ' , __snake_case , )
class A__ ( __snake_case ):
def __init__( self , A_ ):
'''simple docstring'''
super().__init__(A_ )
UpperCamelCase : Tuple = config.num_labels
UpperCamelCase : Union[str, Any] = BertModelWithPabee(A_ )
UpperCamelCase : Tuple = nn.Dropout(config.hidden_dropout_prob )
UpperCamelCase : Optional[Any] = nn.ModuleList(
[nn.Linear(config.hidden_size , self.config.num_labels ) for _ in range(config.num_hidden_layers )] )
self.init_weights()
@add_start_docstrings_to_model_forward(A_ )
def __UpperCamelCase( self , A_=None , A_=None , A_=None , A_=None , A_=None , A_=None , A_=None , ):
'''simple docstring'''
UpperCamelCase : Dict = self.bert(
input_ids=A_ , attention_mask=A_ , token_type_ids=A_ , position_ids=A_ , head_mask=A_ , inputs_embeds=A_ , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , )
UpperCamelCase : Union[str, Any] = (logits[-1],)
if labels is not None:
UpperCamelCase : Tuple = None
UpperCamelCase : Tuple = 0
for ix, logits_item in enumerate(A_ ):
if self.num_labels == 1:
# We are doing regression
UpperCamelCase : List[Any] = MSELoss()
UpperCamelCase : Optional[Any] = loss_fct(logits_item.view(-1 ) , labels.view(-1 ) )
else:
UpperCamelCase : Any = CrossEntropyLoss()
UpperCamelCase : Optional[int] = loss_fct(logits_item.view(-1 , self.num_labels ) , labels.view(-1 ) )
if total_loss is None:
UpperCamelCase : Dict = loss
else:
total_loss += loss * (ix + 1)
total_weights += ix + 1
UpperCamelCase : List[Any] = (total_loss / total_weights,) + outputs
return outputs
| 52
|
from __future__ import annotations
from functools import lru_cache
from math import ceil
__lowerCamelCase : str = 100
__lowerCamelCase : Any = set(range(3, NUM_PRIMES, 2))
primes.add(2)
__lowerCamelCase : int
for prime in range(3, ceil(NUM_PRIMES**0.5), 2):
if prime not in primes:
continue
primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime)))
@lru_cache(maxsize=100 )
def A_ ( _lowerCAmelCase ) -> set[int]:
if number_to_partition < 0:
return set()
elif number_to_partition == 0:
return {1}
UpperCamelCase : set[int] = set()
UpperCamelCase : int
UpperCamelCase : int
for prime in primes:
if prime > number_to_partition:
continue
for sub in partition(number_to_partition - prime ):
ret.add(sub * prime )
return ret
def A_ ( _lowerCAmelCase = 5000 ) -> int | None:
for number_to_partition in range(1 , _lowerCAmelCase ):
if len(partition(_lowerCAmelCase ) ) > number_unique_partitions:
return number_to_partition
return None
if __name__ == "__main__":
print(f"""{solution() = }""")
| 52
| 1
|
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer
from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class A__ ( __snake_case , __snake_case , __snake_case , unittest.TestCase ):
_UpperCAmelCase :Union[str, Any] = AltDiffusionPipeline
_UpperCAmelCase :Any = TEXT_TO_IMAGE_PARAMS
_UpperCAmelCase :Tuple = TEXT_TO_IMAGE_BATCH_PARAMS
_UpperCAmelCase :Any = TEXT_TO_IMAGE_IMAGE_PARAMS
_UpperCAmelCase :Optional[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
def __UpperCamelCase( self ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCamelCase : str = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
UpperCamelCase : int = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="scaled_linear" , clip_sample=A_ , set_alpha_to_one=A_ , )
torch.manual_seed(0 )
UpperCamelCase : List[str] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
# TODO: address the non-deterministic text encoder (fails for save-load tests)
# torch.manual_seed(0)
# text_encoder_config = RobertaSeriesConfig(
# hidden_size=32,
# project_dim=32,
# intermediate_size=37,
# layer_norm_eps=1e-05,
# num_attention_heads=4,
# num_hidden_layers=5,
# vocab_size=5002,
# )
# text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config)
torch.manual_seed(0 )
UpperCamelCase : int = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5002 , )
UpperCamelCase : Dict = CLIPTextModel(A_ )
UpperCamelCase : Optional[int] = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" )
UpperCamelCase : Dict = 77
UpperCamelCase : List[Any] = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def __UpperCamelCase( self , A_ , A_=0 ):
'''simple docstring'''
if str(A_ ).startswith("mps" ):
UpperCamelCase : Union[str, Any] = torch.manual_seed(A_ )
else:
UpperCamelCase : List[Any] = torch.Generator(device=A_ ).manual_seed(A_ )
UpperCamelCase : Any = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def __UpperCamelCase( self ):
'''simple docstring'''
super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 )
def __UpperCamelCase( self ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[str] = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCamelCase : Tuple = self.get_dummy_components()
torch.manual_seed(0 )
UpperCamelCase : Optional[int] = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5002 , )
# TODO: remove after fixing the non-deterministic text encoder
UpperCamelCase : List[Any] = RobertaSeriesModelWithTransformation(A_ )
UpperCamelCase : List[Any] = text_encoder
UpperCamelCase : str = AltDiffusionPipeline(**A_ )
UpperCamelCase : Optional[Any] = alt_pipe.to(A_ )
alt_pipe.set_progress_bar_config(disable=A_ )
UpperCamelCase : List[str] = self.get_dummy_inputs(A_ )
UpperCamelCase : Tuple = "A photo of an astronaut"
UpperCamelCase : Dict = alt_pipe(**A_ )
UpperCamelCase : int = output.images
UpperCamelCase : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCamelCase : int = np.array(
[0.5_74_81_62, 0.60_44_71_45, 0.48_82_12_17, 0.50_10_06_36, 0.5_43_11_85, 0.45_76_36_83, 0.49_65_76_96, 0.48_13_27_33, 0.47_57_30_93] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Any = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCamelCase : int = self.get_dummy_components()
UpperCamelCase : Optional[Any] = PNDMScheduler(skip_prk_steps=A_ )
torch.manual_seed(0 )
UpperCamelCase : str = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5002 , )
# TODO: remove after fixing the non-deterministic text encoder
UpperCamelCase : Optional[int] = RobertaSeriesModelWithTransformation(A_ )
UpperCamelCase : List[Any] = text_encoder
UpperCamelCase : Dict = AltDiffusionPipeline(**A_ )
UpperCamelCase : Optional[Any] = alt_pipe.to(A_ )
alt_pipe.set_progress_bar_config(disable=A_ )
UpperCamelCase : Any = self.get_dummy_inputs(A_ )
UpperCamelCase : Optional[int] = alt_pipe(**A_ )
UpperCamelCase : Dict = output.images
UpperCamelCase : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCamelCase : Optional[Any] = np.array(
[0.51_60_50_93, 0.5_70_72_41, 0.47_36_55_07, 0.50_57_88_86, 0.5_63_38_77, 0.4_64_25_03, 0.5_18_20_81, 0.48_76_34_84, 0.49_08_42_37] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class A__ ( unittest.TestCase ):
def __UpperCamelCase( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : str = AltDiffusionPipeline.from_pretrained("BAAI/AltDiffusion" , safety_checker=A_ )
UpperCamelCase : Union[str, Any] = alt_pipe.to(A_ )
alt_pipe.set_progress_bar_config(disable=A_ )
UpperCamelCase : Optional[Any] = "A painting of a squirrel eating a burger"
UpperCamelCase : Union[str, Any] = torch.manual_seed(0 )
UpperCamelCase : Optional[Any] = alt_pipe([prompt] , generator=A_ , guidance_scale=6.0 , num_inference_steps=20 , output_type="np" )
UpperCamelCase : Optional[Any] = output.images
UpperCamelCase : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCamelCase : List[str] = np.array([0.10_10, 0.08_00, 0.07_94, 0.08_85, 0.08_43, 0.07_62, 0.07_69, 0.07_29, 0.05_86] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Dict = DDIMScheduler.from_pretrained("BAAI/AltDiffusion" , subfolder="scheduler" )
UpperCamelCase : Optional[Any] = AltDiffusionPipeline.from_pretrained("BAAI/AltDiffusion" , scheduler=A_ , safety_checker=A_ )
UpperCamelCase : Optional[int] = alt_pipe.to(A_ )
alt_pipe.set_progress_bar_config(disable=A_ )
UpperCamelCase : int = "A painting of a squirrel eating a burger"
UpperCamelCase : int = torch.manual_seed(0 )
UpperCamelCase : Dict = alt_pipe([prompt] , generator=A_ , num_inference_steps=2 , output_type="numpy" )
UpperCamelCase : Union[str, Any] = output.images
UpperCamelCase : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCamelCase : List[str] = np.array([0.40_19, 0.40_52, 0.38_10, 0.41_19, 0.39_16, 0.39_82, 0.46_51, 0.41_95, 0.53_23] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 52
|
def A_ ( _lowerCAmelCase ) -> str:
UpperCamelCase : Optional[int] = int(_lowerCAmelCase )
if decimal in (0, 1): # Exit cases for the recursion
return str(_lowerCAmelCase )
UpperCamelCase , UpperCamelCase : Dict = divmod(_lowerCAmelCase , 2 )
return binary_recursive(_lowerCAmelCase ) + str(_lowerCAmelCase )
def A_ ( _lowerCAmelCase ) -> str:
UpperCamelCase : Tuple = str(_lowerCAmelCase ).strip()
if not number:
raise ValueError("No input value was provided" )
UpperCamelCase : Optional[int] = "-" if number.startswith("-" ) else ""
UpperCamelCase : Any = number.lstrip("-" )
if not number.isnumeric():
raise ValueError("Input value is not an integer" )
return F"""{negative}0b{binary_recursive(int(_lowerCAmelCase ) )}"""
if __name__ == "__main__":
from doctest import testmod
testmod()
| 52
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.