code
stringlengths
87
55.2k
code_codestyle
int64
0
349
style_context
stringlengths
135
49.1k
style_context_codestyle
int64
0
349
label
int64
0
1
def __lowerCamelCase ( UpperCamelCase__ = 4000000 ): '''simple docstring''' snake_case_ = [] snake_case_ = 0, 1 while b <= n: if b % 2 == 0: even_fibs.append(UpperCamelCase__ ) snake_case_ = b, a + b return sum(UpperCamelCase__ ) if __name__ == "__main__": print(F'''{solution() = }''')
285
'''simple docstring''' def _SCREAMING_SNAKE_CASE ( UpperCamelCase = "The quick brown fox jumps over the lazy dog" , ): """simple docstring""" lowerCAmelCase__ : str = set() # Replace all the whitespace in our sentence lowerCAmelCase__ : Tuple = input_str.replace(""" """ , """""" ) for alpha in input_str: if "a" <= alpha.lower() <= "z": frequency.add(alpha.lower() ) return len(UpperCamelCase ) == 26 def _SCREAMING_SNAKE_CASE ( UpperCamelCase = "The quick brown fox jumps over the lazy dog" , ): """simple docstring""" lowerCAmelCase__ : Any = [False] * 26 for char in input_str: if char.islower(): lowerCAmelCase__ : Optional[Any] = True elif char.isupper(): lowerCAmelCase__ : Any = True return all(UpperCamelCase ) def _SCREAMING_SNAKE_CASE ( UpperCamelCase = "The quick brown fox jumps over the lazy dog" , ): """simple docstring""" return len({char for char in input_str.lower() if char.isalpha()} ) == 26 def _SCREAMING_SNAKE_CASE ( ): """simple docstring""" from timeit import timeit lowerCAmelCase__ : Union[str, Any] = """from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest""" print(timeit("""is_pangram()""" , setup=UpperCamelCase ) ) print(timeit("""is_pangram_faster()""" , setup=UpperCamelCase ) ) print(timeit("""is_pangram_fastest()""" , setup=UpperCamelCase ) ) # 5.348480500048026, 2.6477354579837993, 1.8470395830227062 # 5.036091582966037, 2.644472333951853, 1.8869528750656173 if __name__ == "__main__": import doctest doctest.testmod() benchmark()
37
0
from __future__ import annotations def A__ ( __lowerCamelCase, __lowerCamelCase ): SCREAMING_SNAKE_CASE_ = position SCREAMING_SNAKE_CASE_ = [ (y + 1, x + 2), (y - 1, x + 2), (y + 1, x - 2), (y - 1, x - 2), (y + 2, x + 1), (y + 2, x - 1), (y - 2, x + 1), (y - 2, x - 1), ] SCREAMING_SNAKE_CASE_ = [] for position in positions: SCREAMING_SNAKE_CASE_ = position if 0 <= y_test < n and 0 <= x_test < n: permissible_positions.append(__lowerCamelCase ) return permissible_positions def A__ ( __lowerCamelCase ): return not any(elem == 0 for row in board for elem in row ) def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ): if is_complete(__lowerCamelCase ): return True for position in get_valid_pos(__lowerCamelCase, len(__lowerCamelCase ) ): SCREAMING_SNAKE_CASE_ = position if board[y][x] == 0: SCREAMING_SNAKE_CASE_ = curr + 1 if open_knight_tour_helper(__lowerCamelCase, __lowerCamelCase, curr + 1 ): return True SCREAMING_SNAKE_CASE_ = 0 return False def A__ ( __lowerCamelCase ): SCREAMING_SNAKE_CASE_ = [[0 for i in range(__lowerCamelCase )] for j in range(__lowerCamelCase )] for i in range(__lowerCamelCase ): for j in range(__lowerCamelCase ): SCREAMING_SNAKE_CASE_ = 1 if open_knight_tour_helper(__lowerCamelCase, (i, j), 1 ): return board SCREAMING_SNAKE_CASE_ = 0 SCREAMING_SNAKE_CASE_ = F'''Open Kight Tour cannot be performed on a board of size {n}''' raise ValueError(__lowerCamelCase ) if __name__ == "__main__": import doctest doctest.testmod()
299
'''simple docstring''' def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" lowerCAmelCase__ : Tuple = abs(UpperCamelCase ) lowerCAmelCase__ : List[Any] = 0 while n > 0: res += n % 10 n //= 10 return res def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" lowerCAmelCase__ : Union[str, Any] = abs(UpperCamelCase ) return n if n < 10 else n % 10 + sum_of_digits(n // 10 ) def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" return sum(int(UpperCamelCase ) for c in str(abs(UpperCamelCase ) ) ) def _SCREAMING_SNAKE_CASE ( ): """simple docstring""" from collections.abc import Callable from timeit import timeit def benchmark_a_function(UpperCamelCase , UpperCamelCase ) -> None: lowerCAmelCase__ : str = f"""{func.__name__}({value})""" lowerCAmelCase__ : str = timeit(f"""__main__.{call}""" , setup="""import __main__""" ) print(f"""{call:56} = {func(UpperCamelCase )} -- {timing:.4f} seconds""" ) for value in (262144, 1125899906842624, 1267650600228229401496703205376): for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact): benchmark_a_function(UpperCamelCase , UpperCamelCase ) print() if __name__ == "__main__": import doctest doctest.testmod() benchmark()
37
0
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging a_ : Dict = logging.get_logger(__name__) a_ : str = { """facebook/xlm-roberta-xl""": """https://huggingface.co/facebook/xlm-roberta-xl/resolve/main/config.json""", """facebook/xlm-roberta-xxl""": """https://huggingface.co/facebook/xlm-roberta-xxl/resolve/main/config.json""", # See all XLM-RoBERTa-XL models at https://huggingface.co/models?filter=xlm-roberta-xl } class snake_case ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" _lowerCamelCase = '''xlm-roberta-xl''' def __init__( self , UpperCamelCase=25_0880 , UpperCamelCase=2560 , UpperCamelCase=36 , UpperCamelCase=32 , UpperCamelCase=1_0240 , UpperCamelCase="gelu" , UpperCamelCase=0.1 , UpperCamelCase=0.1 , UpperCamelCase=514 , UpperCamelCase=1 , UpperCamelCase=0.02 , UpperCamelCase=1e-05 , UpperCamelCase=1 , UpperCamelCase=0 , UpperCamelCase=2 , UpperCamelCase="absolute" , UpperCamelCase=True , UpperCamelCase=None , **UpperCamelCase , ): """simple docstring""" super().__init__(pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase ) lowerCamelCase_ = vocab_size lowerCamelCase_ = hidden_size lowerCamelCase_ = num_hidden_layers lowerCamelCase_ = num_attention_heads lowerCamelCase_ = hidden_act lowerCamelCase_ = intermediate_size lowerCamelCase_ = hidden_dropout_prob lowerCamelCase_ = attention_probs_dropout_prob lowerCamelCase_ = max_position_embeddings lowerCamelCase_ = type_vocab_size lowerCamelCase_ = initializer_range lowerCamelCase_ = layer_norm_eps lowerCamelCase_ = position_embedding_type lowerCamelCase_ = use_cache lowerCamelCase_ = classifier_dropout class snake_case ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" @property def snake_case ( self ): """simple docstring""" if self.task == "multiple-choice": lowerCamelCase_ = {0: """batch""", 1: """choice""", 2: """sequence"""} else: lowerCamelCase_ = {0: """batch""", 1: """sequence"""} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ] )
55
'''simple docstring''' # coding=utf-8 # Copyright 2020 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # this script dumps information about the environment import os import sys import transformers _lowerCAmelCase = '''3''' print('''Python version:''', sys.version) print('''transformers version:''', transformers.__version__) try: import torch print('''Torch version:''', torch.__version__) print('''Cuda available:''', torch.cuda.is_available()) print('''Cuda version:''', torch.version.cuda) print('''CuDNN version:''', torch.backends.cudnn.version()) print('''Number of GPUs available:''', torch.cuda.device_count()) print('''NCCL version:''', torch.cuda.nccl.version()) except ImportError: print('''Torch version:''', None) try: import deepspeed print('''DeepSpeed version:''', deepspeed.__version__) except ImportError: print('''DeepSpeed version:''', None) try: import tensorflow as tf print('''TensorFlow version:''', tf.__version__) print('''TF GPUs available:''', bool(tf.config.list_physical_devices('''GPU'''))) print('''Number of TF GPUs available:''', len(tf.config.list_physical_devices('''GPU'''))) except ImportError: print('''TensorFlow version:''', None)
37
0
"""simple docstring""" def lowerCamelCase_ (UpperCamelCase__ : str ): _UpperCAmelCase : Optional[Any] = 0 while num > 0: digit_sum += num % 10 num //= 10 return digit_sum def lowerCamelCase_ (UpperCamelCase__ : Tuple = 100 ): _UpperCAmelCase : Optional[int] = 1 _UpperCAmelCase : List[str] = 2 for i in range(2 , max_n + 1 ): _UpperCAmelCase : Dict = pre_numerator _UpperCAmelCase : Optional[int] = 2 * i // 3 if i % 3 == 0 else 1 _UpperCAmelCase : Tuple = cur_numerator _UpperCAmelCase : Optional[Any] = e_cont * pre_numerator + temp return sum_digits(UpperCamelCase__ ) if __name__ == "__main__": print(f"{solution() = }")
263
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _lowerCAmelCase = logging.get_logger(__name__) _lowerCAmelCase = { '''facebook/xlm-roberta-xl''': '''https://huggingface.co/facebook/xlm-roberta-xl/resolve/main/config.json''', '''facebook/xlm-roberta-xxl''': '''https://huggingface.co/facebook/xlm-roberta-xxl/resolve/main/config.json''', # See all XLM-RoBERTa-XL models at https://huggingface.co/models?filter=xlm-roberta-xl } class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' __lowercase : int = '''xlm-roberta-xl''' def __init__( self ,__UpperCAmelCase=25_0880 ,__UpperCAmelCase=2560 ,__UpperCAmelCase=36 ,__UpperCAmelCase=32 ,__UpperCAmelCase=1_0240 ,__UpperCAmelCase="gelu" ,__UpperCAmelCase=0.1 ,__UpperCAmelCase=0.1 ,__UpperCAmelCase=514 ,__UpperCAmelCase=1 ,__UpperCAmelCase=0.0_2 ,__UpperCAmelCase=1E-05 ,__UpperCAmelCase=1 ,__UpperCAmelCase=0 ,__UpperCAmelCase=2 ,__UpperCAmelCase="absolute" ,__UpperCAmelCase=True ,__UpperCAmelCase=None ,**__UpperCAmelCase ,) -> str: super().__init__(pad_token_id=__UpperCAmelCase ,bos_token_id=__UpperCAmelCase ,eos_token_id=__UpperCAmelCase ,**__UpperCAmelCase ) lowerCAmelCase__ : List[Any] = vocab_size lowerCAmelCase__ : int = hidden_size lowerCAmelCase__ : int = num_hidden_layers lowerCAmelCase__ : str = num_attention_heads lowerCAmelCase__ : int = hidden_act lowerCAmelCase__ : Dict = intermediate_size lowerCAmelCase__ : List[Any] = hidden_dropout_prob lowerCAmelCase__ : str = attention_probs_dropout_prob lowerCAmelCase__ : Optional[int] = max_position_embeddings lowerCAmelCase__ : List[str] = type_vocab_size lowerCAmelCase__ : List[Any] = initializer_range lowerCAmelCase__ : Tuple = layer_norm_eps lowerCAmelCase__ : int = position_embedding_type lowerCAmelCase__ : Optional[Any] = use_cache lowerCAmelCase__ : Optional[Any] = classifier_dropout class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' @property def UpperCAmelCase_ ( self ) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": lowerCAmelCase__ : Dict = {0: """batch""", 1: """choice""", 2: """sequence"""} else: lowerCAmelCase__ : Any = {0: """batch""", 1: """sequence"""} return OrderedDict( [ ("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ] )
37
0
from dataclasses import dataclass from typing import Optional import numpy as np import torch import torch.nn as nn from ..utils import BaseOutput, is_torch_version, randn_tensor from .attention_processor import SpatialNorm from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block @dataclass class __lowercase ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" UpperCamelCase : torch.FloatTensor class __lowercase ( nn.Module ): """simple docstring""" def __init__( self , A=3 , A=3 , A=("DownEncoderBlock2D",) , A=(64,) , A=2 , A=32 , A="silu" , A=True , ) -> Optional[int]: '''simple docstring''' super().__init__() lowerCamelCase = layers_per_block lowerCamelCase = torch.nn.Convad( __UpperCAmelCase , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , ) lowerCamelCase = None lowerCamelCase = nn.ModuleList([] ) # down lowerCamelCase = block_out_channels[0] for i, down_block_type in enumerate(__UpperCAmelCase ): lowerCamelCase = output_channel lowerCamelCase = block_out_channels[i] lowerCamelCase = i == len(__UpperCAmelCase ) - 1 lowerCamelCase = get_down_block( __UpperCAmelCase , num_layers=self.layers_per_block , in_channels=__UpperCAmelCase , out_channels=__UpperCAmelCase , add_downsample=not is_final_block , resnet_eps=1e-6 , downsample_padding=0 , resnet_act_fn=__UpperCAmelCase , resnet_groups=__UpperCAmelCase , attention_head_dim=__UpperCAmelCase , temb_channels=__UpperCAmelCase , ) self.down_blocks.append(__UpperCAmelCase ) # mid lowerCamelCase = UNetMidBlockaD( in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=__UpperCAmelCase , output_scale_factor=1 , resnet_time_scale_shift="""default""" , attention_head_dim=block_out_channels[-1] , resnet_groups=__UpperCAmelCase , temb_channels=__UpperCAmelCase , ) # out lowerCamelCase = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=__UpperCAmelCase , eps=1e-6 ) lowerCamelCase = nn.SiLU() lowerCamelCase = 2 * out_channels if double_z else out_channels lowerCamelCase = nn.Convad(block_out_channels[-1] , __UpperCAmelCase , 3 , padding=1 ) lowerCamelCase = False def __A ( self , A ) -> Optional[Any]: '''simple docstring''' lowerCamelCase = x lowerCamelCase = self.conv_in(__UpperCAmelCase ) if self.training and self.gradient_checkpointing: def create_custom_forward(A ): def custom_forward(*A ): return module(*__UpperCAmelCase ) return custom_forward # down if is_torch_version(""">=""" , """1.11.0""" ): for down_block in self.down_blocks: lowerCamelCase = torch.utils.checkpoint.checkpoint( create_custom_forward(__UpperCAmelCase ) , __UpperCAmelCase , use_reentrant=__UpperCAmelCase ) # middle lowerCamelCase = torch.utils.checkpoint.checkpoint( create_custom_forward(self.mid_block ) , __UpperCAmelCase , use_reentrant=__UpperCAmelCase ) else: for down_block in self.down_blocks: lowerCamelCase = torch.utils.checkpoint.checkpoint(create_custom_forward(__UpperCAmelCase ) , __UpperCAmelCase ) # middle lowerCamelCase = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , __UpperCAmelCase ) else: # down for down_block in self.down_blocks: lowerCamelCase = down_block(__UpperCAmelCase ) # middle lowerCamelCase = self.mid_block(__UpperCAmelCase ) # post-process lowerCamelCase = self.conv_norm_out(__UpperCAmelCase ) lowerCamelCase = self.conv_act(__UpperCAmelCase ) lowerCamelCase = self.conv_out(__UpperCAmelCase ) return sample class __lowercase ( nn.Module ): """simple docstring""" def __init__( self , A=3 , A=3 , A=("UpDecoderBlock2D",) , A=(64,) , A=2 , A=32 , A="silu" , A="group" , ) -> List[Any]: '''simple docstring''' super().__init__() lowerCamelCase = layers_per_block lowerCamelCase = nn.Convad( __UpperCAmelCase , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , ) lowerCamelCase = None lowerCamelCase = nn.ModuleList([] ) lowerCamelCase = in_channels if norm_type == """spatial""" else None # mid lowerCamelCase = UNetMidBlockaD( in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=__UpperCAmelCase , output_scale_factor=1 , resnet_time_scale_shift="""default""" if norm_type == """group""" else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=__UpperCAmelCase , temb_channels=__UpperCAmelCase , ) # up lowerCamelCase = list(reversed(__UpperCAmelCase ) ) lowerCamelCase = reversed_block_out_channels[0] for i, up_block_type in enumerate(__UpperCAmelCase ): lowerCamelCase = output_channel lowerCamelCase = reversed_block_out_channels[i] lowerCamelCase = i == len(__UpperCAmelCase ) - 1 lowerCamelCase = get_up_block( __UpperCAmelCase , num_layers=self.layers_per_block + 1 , in_channels=__UpperCAmelCase , out_channels=__UpperCAmelCase , prev_output_channel=__UpperCAmelCase , add_upsample=not is_final_block , resnet_eps=1e-6 , resnet_act_fn=__UpperCAmelCase , resnet_groups=__UpperCAmelCase , attention_head_dim=__UpperCAmelCase , temb_channels=__UpperCAmelCase , resnet_time_scale_shift=__UpperCAmelCase , ) self.up_blocks.append(__UpperCAmelCase ) lowerCamelCase = output_channel # out if norm_type == "spatial": lowerCamelCase = SpatialNorm(block_out_channels[0] , __UpperCAmelCase ) else: lowerCamelCase = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=__UpperCAmelCase , eps=1e-6 ) lowerCamelCase = nn.SiLU() lowerCamelCase = nn.Convad(block_out_channels[0] , __UpperCAmelCase , 3 , padding=1 ) lowerCamelCase = False def __A ( self , A , A=None ) -> Optional[Any]: '''simple docstring''' lowerCamelCase = z lowerCamelCase = self.conv_in(__UpperCAmelCase ) lowerCamelCase = next(iter(self.up_blocks.parameters() ) ).dtype if self.training and self.gradient_checkpointing: def create_custom_forward(A ): def custom_forward(*A ): return module(*__UpperCAmelCase ) return custom_forward if is_torch_version(""">=""" , """1.11.0""" ): # middle lowerCamelCase = torch.utils.checkpoint.checkpoint( create_custom_forward(self.mid_block ) , __UpperCAmelCase , __UpperCAmelCase , use_reentrant=__UpperCAmelCase ) lowerCamelCase = sample.to(__UpperCAmelCase ) # up for up_block in self.up_blocks: lowerCamelCase = torch.utils.checkpoint.checkpoint( create_custom_forward(__UpperCAmelCase ) , __UpperCAmelCase , __UpperCAmelCase , use_reentrant=__UpperCAmelCase ) else: # middle lowerCamelCase = torch.utils.checkpoint.checkpoint( create_custom_forward(self.mid_block ) , __UpperCAmelCase , __UpperCAmelCase ) lowerCamelCase = sample.to(__UpperCAmelCase ) # up for up_block in self.up_blocks: lowerCamelCase = torch.utils.checkpoint.checkpoint(create_custom_forward(__UpperCAmelCase ) , __UpperCAmelCase , __UpperCAmelCase ) else: # middle lowerCamelCase = self.mid_block(__UpperCAmelCase , __UpperCAmelCase ) lowerCamelCase = sample.to(__UpperCAmelCase ) # up for up_block in self.up_blocks: lowerCamelCase = up_block(__UpperCAmelCase , __UpperCAmelCase ) # post-process if latent_embeds is None: lowerCamelCase = self.conv_norm_out(__UpperCAmelCase ) else: lowerCamelCase = self.conv_norm_out(__UpperCAmelCase , __UpperCAmelCase ) lowerCamelCase = self.conv_act(__UpperCAmelCase ) lowerCamelCase = self.conv_out(__UpperCAmelCase ) return sample class __lowercase ( nn.Module ): """simple docstring""" def __init__( self , A , A , A , A=None , A="random" , A=False , A=True ) -> List[Any]: '''simple docstring''' super().__init__() lowerCamelCase = n_e lowerCamelCase = vq_embed_dim lowerCamelCase = beta lowerCamelCase = legacy lowerCamelCase = nn.Embedding(self.n_e , self.vq_embed_dim ) self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e ) lowerCamelCase = remap if self.remap is not None: self.register_buffer("""used""" , torch.tensor(np.load(self.remap ) ) ) lowerCamelCase = self.used.shape[0] lowerCamelCase = unknown_index # "random" or "extra" or integer if self.unknown_index == "extra": lowerCamelCase = self.re_embed lowerCamelCase = self.re_embed + 1 print( F'Remapping {self.n_e} indices to {self.re_embed} indices. ' F'Using {self.unknown_index} for unknown indices.' ) else: lowerCamelCase = n_e lowerCamelCase = sane_index_shape def __A ( self , A ) -> List[str]: '''simple docstring''' lowerCamelCase = inds.shape assert len(__UpperCAmelCase ) > 1 lowerCamelCase = inds.reshape(ishape[0] , -1 ) lowerCamelCase = self.used.to(__UpperCAmelCase ) lowerCamelCase = (inds[:, :, None] == used[None, None, ...]).long() lowerCamelCase = match.argmax(-1 ) lowerCamelCase = match.sum(2 ) < 1 if self.unknown_index == "random": lowerCamelCase = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device ) else: lowerCamelCase = self.unknown_index return new.reshape(__UpperCAmelCase ) def __A ( self , A ) -> str: '''simple docstring''' lowerCamelCase = inds.shape assert len(__UpperCAmelCase ) > 1 lowerCamelCase = inds.reshape(ishape[0] , -1 ) lowerCamelCase = self.used.to(__UpperCAmelCase ) if self.re_embed > self.used.shape[0]: # extra token lowerCamelCase = 0 # simply set to zero lowerCamelCase = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , __UpperCAmelCase ) return back.reshape(__UpperCAmelCase ) def __A ( self , A ) -> Union[str, Any]: '''simple docstring''' lowerCamelCase = z.permute(0 , 2 , 3 , 1 ).contiguous() lowerCamelCase = z.view(-1 , self.vq_embed_dim ) # distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z lowerCamelCase = torch.argmin(torch.cdist(__UpperCAmelCase , self.embedding.weight ) , dim=1 ) lowerCamelCase = self.embedding(__UpperCAmelCase ).view(z.shape ) lowerCamelCase = None lowerCamelCase = None # compute loss for embedding if not self.legacy: lowerCamelCase = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 ) else: lowerCamelCase = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 ) # preserve gradients lowerCamelCase = z + (z_q - z).detach() # reshape back to match original input shape lowerCamelCase = z_q.permute(0 , 3 , 1 , 2 ).contiguous() if self.remap is not None: lowerCamelCase = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis lowerCamelCase = self.remap_to_used(__UpperCAmelCase ) lowerCamelCase = min_encoding_indices.reshape(-1 , 1 ) # flatten if self.sane_index_shape: lowerCamelCase = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] ) return z_q, loss, (perplexity, min_encodings, min_encoding_indices) def __A ( self , A , A ) -> Tuple: '''simple docstring''' if self.remap is not None: lowerCamelCase = indices.reshape(shape[0] , -1 ) # add batch axis lowerCamelCase = self.unmap_to_all(__UpperCAmelCase ) lowerCamelCase = indices.reshape(-1 ) # flatten again # get quantized latent vectors lowerCamelCase = self.embedding(__UpperCAmelCase ) if shape is not None: lowerCamelCase = z_q.view(__UpperCAmelCase ) # reshape back to match original input shape lowerCamelCase = z_q.permute(0 , 3 , 1 , 2 ).contiguous() return z_q class __lowercase ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" def __init__( self , A , A=False ) -> Dict: '''simple docstring''' lowerCamelCase = parameters lowerCamelCase = torch.chunk(__UpperCAmelCase , 2 , dim=1 ) lowerCamelCase = torch.clamp(self.logvar , -30.0 , 20.0 ) lowerCamelCase = deterministic lowerCamelCase = torch.exp(0.5 * self.logvar ) lowerCamelCase = torch.exp(self.logvar ) if self.deterministic: lowerCamelCase = torch.zeros_like( self.mean , device=self.parameters.device , dtype=self.parameters.dtype ) def __A ( self , A = None ) -> torch.FloatTensor: '''simple docstring''' lowerCamelCase = randn_tensor( self.mean.shape , generator=__UpperCAmelCase , device=self.parameters.device , dtype=self.parameters.dtype ) lowerCamelCase = self.mean + self.std * sample return x def __A ( self , A=None ) -> str: '''simple docstring''' if self.deterministic: return torch.Tensor([0.0] ) else: if other is None: return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] ) else: return 0.5 * torch.sum( torch.pow(self.mean - other.mean , 2 ) / other.var + self.var / other.var - 1.0 - self.logvar + other.logvar , dim=[1, 2, 3] , ) def __A ( self , A , A=[1, 2, 3] ) -> Optional[Any]: '''simple docstring''' if self.deterministic: return torch.Tensor([0.0] ) lowerCamelCase = np.log(2.0 * np.pi ) return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=__UpperCAmelCase ) def __A ( self ) -> Optional[int]: '''simple docstring''' return self.mean
252
'''simple docstring''' from __future__ import annotations import math from collections import Counter from string import ascii_lowercase def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" lowerCAmelCase__ , lowerCAmelCase__ : List[str] = analyze_text(UpperCamelCase ) lowerCAmelCase__ : Optional[int] = list(""" """ + ascii_lowercase ) # what is our total sum of probabilities. lowerCAmelCase__ : List[Any] = sum(single_char_strings.values() ) # one length string lowerCAmelCase__ : Optional[int] = 0 # for each alpha we go in our dict and if it is in it we calculate entropy for ch in my_alphas: if ch in single_char_strings: lowerCAmelCase__ : List[Any] = single_char_strings[ch] lowerCAmelCase__ : List[Any] = my_str / all_sum my_fir_sum += prob * math.loga(UpperCamelCase ) # entropy formula. # print entropy print(f"""{round(-1 * my_fir_sum ):.1f}""" ) # two len string lowerCAmelCase__ : Dict = sum(two_char_strings.values() ) lowerCAmelCase__ : int = 0 # for each alpha (two in size) calculate entropy. for cha in my_alphas: for cha in my_alphas: lowerCAmelCase__ : Union[str, Any] = cha + cha if sequence in two_char_strings: lowerCAmelCase__ : Dict = two_char_strings[sequence] lowerCAmelCase__ : Tuple = int(UpperCamelCase ) / all_sum my_sec_sum += prob * math.loga(UpperCamelCase ) # print second entropy print(f"""{round(-1 * my_sec_sum ):.1f}""" ) # print the difference between them print(f"""{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}""" ) def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" lowerCAmelCase__ : Optional[Any] = Counter() # type: ignore lowerCAmelCase__ : Tuple = Counter() # type: ignore single_char_strings[text[-1]] += 1 # first case when we have space at start. two_char_strings[" " + text[0]] += 1 for i in range(0 , len(UpperCamelCase ) - 1 ): single_char_strings[text[i]] += 1 two_char_strings[text[i : i + 2]] += 1 return single_char_strings, two_char_strings def _SCREAMING_SNAKE_CASE ( ): """simple docstring""" import doctest doctest.testmod() # text = ( # "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark " # "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest " # "jointure saw horrible. He private he on be imagine suppose. Fertile " # "beloved evident through no service elderly is. Blind there if every no so " # "at. Own neglected you preferred way sincerity delivered his attempted. To " # "of message cottage windows do besides against uncivil. Delightful " # "unreserved impossible few estimating men favourable see entreaties. She " # "propriety immediate was improving. He or entrance humoured likewise " # "moderate. Much nor game son say feel. Fat make met can must form into " # "gate. Me we offending prevailed discovery. " # ) # calculate_prob(text) if __name__ == "__main__": main()
37
0
import argparse import logging import sys from unittest.mock import patch import run_glue_deebert from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow logging.basicConfig(level=logging.DEBUG) UpperCAmelCase__ = logging.getLogger() def A ( ) -> Tuple: '''simple docstring''' _UpperCAmelCase = argparse.ArgumentParser() parser.add_argument('-f' ) _UpperCAmelCase = parser.parse_args() return args.f class __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ): def _lowerCamelCase ( self : Tuple) -> None: """simple docstring""" _UpperCAmelCase = logging.StreamHandler(sys.stdout) logger.addHandler(__UpperCAmelCase) def _lowerCamelCase ( self : int , A : Dict) -> Union[str, Any]: """simple docstring""" _UpperCAmelCase = get_gpu_count() if n_gpu > 1: pass # XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560 # script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py" # distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split() # cmd = [sys.executable] + distributed_args + args # execute_subprocess_async(cmd, env=self.get_env()) # XXX: test the results - need to save them first into .json file else: args.insert(0 , 'run_glue_deebert.py') with patch.object(__UpperCAmelCase , 'argv' , __UpperCAmelCase): _UpperCAmelCase = run_glue_deebert.main() for value in result.values(): self.assertGreaterEqual(__UpperCAmelCase , 0.6_6_6) @slow @require_torch_non_multi_gpu def _lowerCamelCase ( self : Dict) -> int: """simple docstring""" _UpperCAmelCase = """ --model_type roberta --model_name_or_path roberta-base --task_name MRPC --do_train --do_eval --do_lower_case --data_dir ./tests/fixtures/tests_samples/MRPC/ --max_seq_length 128 --per_gpu_eval_batch_size=1 --per_gpu_train_batch_size=8 --learning_rate 2e-4 --num_train_epochs 3 --overwrite_output_dir --seed 42 --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage --plot_data_dir ./examples/deebert/results/ --save_steps 0 --overwrite_cache --eval_after_first_stage """.split() self.run_and_check(__UpperCAmelCase) _UpperCAmelCase = """ --model_type roberta --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage --task_name MRPC --do_eval --do_lower_case --data_dir ./tests/fixtures/tests_samples/MRPC/ --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage --plot_data_dir ./examples/deebert/results/ --max_seq_length 128 --eval_each_highway --eval_highway --overwrite_cache --per_gpu_eval_batch_size=1 """.split() self.run_and_check(__UpperCAmelCase) _UpperCAmelCase = """ --model_type roberta --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage --task_name MRPC --do_eval --do_lower_case --data_dir ./tests/fixtures/tests_samples/MRPC/ --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage --plot_data_dir ./examples/deebert/results/ --max_seq_length 128 --early_exit_entropy 0.1 --eval_highway --overwrite_cache --per_gpu_eval_batch_size=1 """.split() self.run_and_check(__UpperCAmelCase)
339
'''simple docstring''' import argparse import json import torch from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase=1 ): """simple docstring""" if n_shave_prefix_segments >= 0: return ".".join(path.split(""".""" )[n_shave_prefix_segments:] ) else: return ".".join(path.split(""".""" )[:n_shave_prefix_segments] ) def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase=0 ): """simple docstring""" lowerCAmelCase__ : Union[str, Any] = [] for old_item in old_list: lowerCAmelCase__ : Optional[Any] = old_item.replace("""in_layers.0""" , """norm1""" ) lowerCAmelCase__ : Optional[int] = new_item.replace("""in_layers.2""" , """conv1""" ) lowerCAmelCase__ : Dict = new_item.replace("""out_layers.0""" , """norm2""" ) lowerCAmelCase__ : str = new_item.replace("""out_layers.3""" , """conv2""" ) lowerCAmelCase__ : str = new_item.replace("""emb_layers.1""" , """time_emb_proj""" ) lowerCAmelCase__ : Optional[Any] = new_item.replace("""skip_connection""" , """conv_shortcut""" ) lowerCAmelCase__ : Union[str, Any] = shave_segments(UpperCamelCase , n_shave_prefix_segments=UpperCamelCase ) mapping.append({"""old""": old_item, """new""": new_item} ) return mapping def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase=0 ): """simple docstring""" lowerCAmelCase__ : int = [] for old_item in old_list: lowerCAmelCase__ : List[str] = old_item lowerCAmelCase__ : int = new_item.replace("""norm.weight""" , """group_norm.weight""" ) lowerCAmelCase__ : Optional[Any] = new_item.replace("""norm.bias""" , """group_norm.bias""" ) lowerCAmelCase__ : Optional[Any] = new_item.replace("""proj_out.weight""" , """proj_attn.weight""" ) lowerCAmelCase__ : int = new_item.replace("""proj_out.bias""" , """proj_attn.bias""" ) lowerCAmelCase__ : str = shave_segments(UpperCamelCase , n_shave_prefix_segments=UpperCamelCase ) mapping.append({"""old""": old_item, """new""": new_item} ) return mapping def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=None ): """simple docstring""" assert isinstance(UpperCamelCase , UpperCamelCase ), "Paths should be a list of dicts containing 'old' and 'new' keys." # Splits the attention layers into three variables. if attention_paths_to_split is not None: for path, path_map in attention_paths_to_split.items(): lowerCAmelCase__ : Any = old_checkpoint[path] lowerCAmelCase__ : int = old_tensor.shape[0] // 3 lowerCAmelCase__ : int = (-1, channels) if len(old_tensor.shape ) == 3 else (-1) lowerCAmelCase__ : Tuple = old_tensor.shape[0] // config["""num_head_channels"""] // 3 lowerCAmelCase__ : List[Any] = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] ) lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : str = old_tensor.split(channels // num_heads , dim=1 ) lowerCAmelCase__ : int = query.reshape(UpperCamelCase ) lowerCAmelCase__ : Union[str, Any] = key.reshape(UpperCamelCase ) lowerCAmelCase__ : Optional[int] = value.reshape(UpperCamelCase ) for path in paths: lowerCAmelCase__ : Any = path["""new"""] # These have already been assigned if attention_paths_to_split is not None and new_path in attention_paths_to_split: continue # Global renaming happens here lowerCAmelCase__ : Any = new_path.replace("""middle_block.0""" , """mid_block.resnets.0""" ) lowerCAmelCase__ : Any = new_path.replace("""middle_block.1""" , """mid_block.attentions.0""" ) lowerCAmelCase__ : Any = new_path.replace("""middle_block.2""" , """mid_block.resnets.1""" ) if additional_replacements is not None: for replacement in additional_replacements: lowerCAmelCase__ : Any = new_path.replace(replacement["""old"""] , replacement["""new"""] ) # proj_attn.weight has to be converted from conv 1D to linear if "proj_attn.weight" in new_path: lowerCAmelCase__ : List[Any] = old_checkpoint[path["""old"""]][:, :, 0] else: lowerCAmelCase__ : Dict = old_checkpoint[path["""old"""]] def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ): """simple docstring""" lowerCAmelCase__ : str = {} lowerCAmelCase__ : str = checkpoint["""time_embed.0.weight"""] lowerCAmelCase__ : List[Any] = checkpoint["""time_embed.0.bias"""] lowerCAmelCase__ : int = checkpoint["""time_embed.2.weight"""] lowerCAmelCase__ : List[str] = checkpoint["""time_embed.2.bias"""] lowerCAmelCase__ : str = checkpoint["""input_blocks.0.0.weight"""] lowerCAmelCase__ : Any = checkpoint["""input_blocks.0.0.bias"""] lowerCAmelCase__ : Union[str, Any] = checkpoint["""out.0.weight"""] lowerCAmelCase__ : Union[str, Any] = checkpoint["""out.0.bias"""] lowerCAmelCase__ : str = checkpoint["""out.2.weight"""] lowerCAmelCase__ : Tuple = checkpoint["""out.2.bias"""] # Retrieves the keys for the input blocks only lowerCAmelCase__ : Optional[Any] = len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """input_blocks""" in layer} ) lowerCAmelCase__ : Optional[Any] = { layer_id: [key for key in checkpoint if f"""input_blocks.{layer_id}""" in key] for layer_id in range(UpperCamelCase ) } # Retrieves the keys for the middle blocks only lowerCAmelCase__ : Union[str, Any] = len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """middle_block""" in layer} ) lowerCAmelCase__ : Union[str, Any] = { layer_id: [key for key in checkpoint if f"""middle_block.{layer_id}""" in key] for layer_id in range(UpperCamelCase ) } # Retrieves the keys for the output blocks only lowerCAmelCase__ : List[str] = len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """output_blocks""" in layer} ) lowerCAmelCase__ : List[Any] = { layer_id: [key for key in checkpoint if f"""output_blocks.{layer_id}""" in key] for layer_id in range(UpperCamelCase ) } for i in range(1 , UpperCamelCase ): lowerCAmelCase__ : Dict = (i - 1) // (config["""num_res_blocks"""] + 1) lowerCAmelCase__ : Tuple = (i - 1) % (config["""num_res_blocks"""] + 1) lowerCAmelCase__ : Optional[int] = [key for key in input_blocks[i] if f"""input_blocks.{i}.0""" in key] lowerCAmelCase__ : Optional[Any] = [key for key in input_blocks[i] if f"""input_blocks.{i}.1""" in key] if f"""input_blocks.{i}.0.op.weight""" in checkpoint: lowerCAmelCase__ : Optional[int] = checkpoint[ f"""input_blocks.{i}.0.op.weight""" ] lowerCAmelCase__ : Tuple = checkpoint[ f"""input_blocks.{i}.0.op.bias""" ] continue lowerCAmelCase__ : Optional[Any] = renew_resnet_paths(UpperCamelCase ) lowerCAmelCase__ : Dict = {"""old""": f"""input_blocks.{i}.0""", """new""": f"""down_blocks.{block_id}.resnets.{layer_in_block_id}"""} lowerCAmelCase__ : Optional[Any] = {"""old""": """resnets.2.op""", """new""": """downsamplers.0.op"""} assign_to_checkpoint( UpperCamelCase , UpperCamelCase , UpperCamelCase , additional_replacements=[meta_path, resnet_op] , config=UpperCamelCase ) if len(UpperCamelCase ): lowerCAmelCase__ : Optional[Any] = renew_attention_paths(UpperCamelCase ) lowerCAmelCase__ : Tuple = { """old""": f"""input_blocks.{i}.1""", """new""": f"""down_blocks.{block_id}.attentions.{layer_in_block_id}""", } lowerCAmelCase__ : List[str] = { f"""input_blocks.{i}.1.qkv.bias""": { """key""": f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias""", """query""": f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias""", """value""": f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias""", }, f"""input_blocks.{i}.1.qkv.weight""": { """key""": f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight""", """query""": f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight""", """value""": f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight""", }, } assign_to_checkpoint( UpperCamelCase , UpperCamelCase , UpperCamelCase , additional_replacements=[meta_path] , attention_paths_to_split=UpperCamelCase , config=UpperCamelCase , ) lowerCAmelCase__ : Dict = middle_blocks[0] lowerCAmelCase__ : Union[str, Any] = middle_blocks[1] lowerCAmelCase__ : Dict = middle_blocks[2] lowerCAmelCase__ : Any = renew_resnet_paths(UpperCamelCase ) assign_to_checkpoint(UpperCamelCase , UpperCamelCase , UpperCamelCase , config=UpperCamelCase ) lowerCAmelCase__ : Dict = renew_resnet_paths(UpperCamelCase ) assign_to_checkpoint(UpperCamelCase , UpperCamelCase , UpperCamelCase , config=UpperCamelCase ) lowerCAmelCase__ : Optional[int] = renew_attention_paths(UpperCamelCase ) lowerCAmelCase__ : Optional[int] = { """middle_block.1.qkv.bias""": { """key""": """mid_block.attentions.0.key.bias""", """query""": """mid_block.attentions.0.query.bias""", """value""": """mid_block.attentions.0.value.bias""", }, """middle_block.1.qkv.weight""": { """key""": """mid_block.attentions.0.key.weight""", """query""": """mid_block.attentions.0.query.weight""", """value""": """mid_block.attentions.0.value.weight""", }, } assign_to_checkpoint( UpperCamelCase , UpperCamelCase , UpperCamelCase , attention_paths_to_split=UpperCamelCase , config=UpperCamelCase ) for i in range(UpperCamelCase ): lowerCAmelCase__ : Tuple = i // (config["""num_res_blocks"""] + 1) lowerCAmelCase__ : List[str] = i % (config["""num_res_blocks"""] + 1) lowerCAmelCase__ : int = [shave_segments(UpperCamelCase , 2 ) for name in output_blocks[i]] lowerCAmelCase__ : Union[str, Any] = {} for layer in output_block_layers: lowerCAmelCase__ , lowerCAmelCase__ : Any = layer.split(""".""" )[0], shave_segments(UpperCamelCase , 1 ) if layer_id in output_block_list: output_block_list[layer_id].append(UpperCamelCase ) else: lowerCAmelCase__ : str = [layer_name] if len(UpperCamelCase ) > 1: lowerCAmelCase__ : str = [key for key in output_blocks[i] if f"""output_blocks.{i}.0""" in key] lowerCAmelCase__ : Dict = [key for key in output_blocks[i] if f"""output_blocks.{i}.1""" in key] lowerCAmelCase__ : Optional[int] = renew_resnet_paths(UpperCamelCase ) lowerCAmelCase__ : int = renew_resnet_paths(UpperCamelCase ) lowerCAmelCase__ : Optional[int] = {"""old""": f"""output_blocks.{i}.0""", """new""": f"""up_blocks.{block_id}.resnets.{layer_in_block_id}"""} assign_to_checkpoint(UpperCamelCase , UpperCamelCase , UpperCamelCase , additional_replacements=[meta_path] , config=UpperCamelCase ) if ["conv.weight", "conv.bias"] in output_block_list.values(): lowerCAmelCase__ : List[Any] = list(output_block_list.values() ).index(["""conv.weight""", """conv.bias"""] ) lowerCAmelCase__ : int = checkpoint[ f"""output_blocks.{i}.{index}.conv.weight""" ] lowerCAmelCase__ : int = checkpoint[ f"""output_blocks.{i}.{index}.conv.bias""" ] # Clear attentions as they have been attributed above. if len(UpperCamelCase ) == 2: lowerCAmelCase__ : Tuple = [] if len(UpperCamelCase ): lowerCAmelCase__ : Dict = renew_attention_paths(UpperCamelCase ) lowerCAmelCase__ : Tuple = { """old""": f"""output_blocks.{i}.1""", """new""": f"""up_blocks.{block_id}.attentions.{layer_in_block_id}""", } lowerCAmelCase__ : Tuple = { f"""output_blocks.{i}.1.qkv.bias""": { """key""": f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias""", """query""": f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias""", """value""": f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias""", }, f"""output_blocks.{i}.1.qkv.weight""": { """key""": f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight""", """query""": f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight""", """value""": f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight""", }, } assign_to_checkpoint( UpperCamelCase , UpperCamelCase , UpperCamelCase , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any("""qkv""" in key for key in attentions ) else None , config=UpperCamelCase , ) else: lowerCAmelCase__ : int = renew_resnet_paths(UpperCamelCase , n_shave_prefix_segments=1 ) for path in resnet_0_paths: lowerCAmelCase__ : Tuple = """.""".join(["""output_blocks""", str(UpperCamelCase ), path["""old"""]] ) lowerCAmelCase__ : List[Any] = """.""".join(["""up_blocks""", str(UpperCamelCase ), """resnets""", str(UpperCamelCase ), path["""new"""]] ) lowerCAmelCase__ : Union[str, Any] = checkpoint[old_path] return new_checkpoint if __name__ == "__main__": _lowerCAmelCase = argparse.ArgumentParser() parser.add_argument( '''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.''' ) parser.add_argument( '''--config_file''', default=None, type=str, required=True, help='''The config json file corresponding to the architecture.''', ) parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''') _lowerCAmelCase = parser.parse_args() _lowerCAmelCase = torch.load(args.checkpoint_path) with open(args.config_file) as f: _lowerCAmelCase = json.loads(f.read()) _lowerCAmelCase = convert_ldm_checkpoint(checkpoint, config) if "ldm" in config: del config["ldm"] _lowerCAmelCase = UNetaDModel(**config) model.load_state_dict(converted_checkpoint) try: _lowerCAmelCase = DDPMScheduler.from_config('''/'''.join(args.checkpoint_path.split('''/''')[:-1])) _lowerCAmelCase = VQModel.from_pretrained('''/'''.join(args.checkpoint_path.split('''/''')[:-1])) _lowerCAmelCase = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae) pipe.save_pretrained(args.dump_path) except: # noqa: E722 model.save_pretrained(args.dump_path)
37
0
import argparse import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing the experiment tracking capability, # and builds off the `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To help focus on the differences in the code, building `DataLoaders` # was refactored into its own function. # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## A : List[str] = 1_6 A : Any = 3_2 def __lowerCAmelCase ( a__ , a__ = 16 ) -> List[Any]: __a = AutoTokenizer.from_pretrained('''bert-base-cased''' ) __a = load_dataset('''glue''' , '''mrpc''' ) def tokenize_function(a__ ): # max_length=None => use the model max length (it's actually the default) __a = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=a__ , max_length=a__ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): __a = datasets.map( a__ , batched=a__ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library __a = tokenized_datasets.rename_column('''label''' , '''labels''' ) def collate_fn(a__ ): # On TPU it's best to pad everything to the same length or training will be very slow. __a = 128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": __a = 16 elif accelerator.mixed_precision != "no": __a = 8 else: __a = None return tokenizer.pad( a__ , padding='''longest''' , max_length=a__ , pad_to_multiple_of=a__ , return_tensors='''pt''' , ) # Instantiate dataloaders. __a = DataLoader( tokenized_datasets['''train'''] , shuffle=a__ , collate_fn=a__ , batch_size=a__ ) __a = DataLoader( tokenized_datasets['''validation'''] , shuffle=a__ , collate_fn=a__ , batch_size=a__ ) return train_dataloader, eval_dataloader # For testing only if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1": from accelerate.test_utils.training import mocked_dataloaders A : Any = mocked_dataloaders # noqa: F811 def __lowerCAmelCase ( a__ , a__ ) -> int: if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , a__ ) == "1": __a = 2 # Initialize Accelerator # New Code # # We pass in "all" to `log_with` to grab all available trackers in the environment # Note: If using a custom `Tracker` class, should be passed in here such as: # >>> log_with = ["all", MyCustomTrackerClassInstance()] if args.with_tracking: __a = Accelerator( cpu=args.cpu , mixed_precision=args.mixed_precision , log_with='''all''' , project_dir=args.project_dir ) else: __a = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs __a = config["""lr"""] __a = int(config['''num_epochs'''] ) __a = int(config['''seed'''] ) __a = int(config['''batch_size'''] ) set_seed(a__ ) __a = get_dataloaders(a__ , a__ ) __a = evaluate.load('''glue''' , '''mrpc''' ) # If the batch size is too big we use gradient accumulation __a = 1 if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU: __a = batch_size // MAX_GPU_BATCH_SIZE __a = MAX_GPU_BATCH_SIZE # Instantiate the model (we build the model here so that the seed also control new weights initialization) __a = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=a__ ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). __a = model.to(accelerator.device ) # Instantiate optimizer __a = AdamW(params=model.parameters() , lr=a__ ) # Instantiate scheduler __a = get_linear_schedule_with_warmup( optimizer=a__ , num_warmup_steps=100 , num_training_steps=(len(a__ ) * num_epochs) // gradient_accumulation_steps , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. __a = accelerator.prepare( a__ , a__ , a__ , a__ , a__ ) # New Code # # We need to initialize the trackers we use. Overall configurations can also be stored if args.with_tracking: __a = os.path.split(a__ )[-1].split('''.''' )[0] accelerator.init_trackers(a__ , a__ ) # Now we train the model for epoch in range(a__ ): model.train() # New Code # # For our tracking example, we will log the total loss of each epoch if args.with_tracking: __a = 0 for step, batch in enumerate(a__ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) __a = model(**a__ ) __a = outputs.loss # New Code # if args.with_tracking: total_loss += loss.detach().float() __a = loss / gradient_accumulation_steps accelerator.backward(a__ ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(a__ ): # We could avoid this line since we set the accelerator with `device_placement=True` (the default). batch.to(accelerator.device ) with torch.no_grad(): __a = model(**a__ ) __a = outputs.logits.argmax(dim=-1 ) __a = accelerator.gather_for_metrics((predictions, batch['''labels''']) ) metric.add_batch( predictions=a__ , references=a__ , ) __a = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F"""epoch {epoch}:""" , a__ ) # New Code # # To actually log, we call `Accelerator.log` # The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int` if args.with_tracking: accelerator.log( { '''accuracy''': eval_metric['''accuracy'''], '''f1''': eval_metric['''f1'''], '''train_loss''': total_loss.item() / len(a__ ), '''epoch''': epoch, } , step=a__ , ) # New Code # # When a run is finished, you should call `accelerator.end_training()` # to close all of the open trackers if args.with_tracking: accelerator.end_training() def __lowerCAmelCase ( ) -> int: __a = argparse.ArgumentParser(description='''Simple example of training script.''' ) parser.add_argument( '''--mixed_precision''' , type=a__ , default=a__ , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose''' '''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.''' '''and an Nvidia Ampere GPU.''' , ) parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' ) parser.add_argument( '''--with_tracking''' , action='''store_true''' , help='''Whether to load in all available experiment trackers from the environment and use them for logging.''' , ) parser.add_argument( '''--project_dir''' , type=a__ , default='''logs''' , help='''Location on where to store experiment tracking logs` and relevent project information''' , ) __a = parser.parse_args() __a = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16} training_function(a__ , a__ ) if __name__ == "__main__": main()
6
'''simple docstring''' from math import sqrt def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" assert isinstance(UpperCamelCase , UpperCamelCase ) and ( number >= 0 ), "'number' must been an int and positive" lowerCAmelCase__ : int = True # 0 and 1 are none primes. if number <= 1: lowerCAmelCase__ : Optional[Any] = False for divisor in range(2 , int(round(sqrt(UpperCamelCase ) ) ) + 1 ): # if 'number' divisible by 'divisor' then sets 'status' # of false and break up the loop. if number % divisor == 0: lowerCAmelCase__ : Any = False break # precondition assert isinstance(UpperCamelCase , UpperCamelCase ), "'status' must been from type bool" return status def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" assert isinstance(UpperCamelCase , UpperCamelCase ) and (n > 2), "'N' must been an int and > 2" # beginList: contains all natural numbers from 2 up to N lowerCAmelCase__ : List[str] = list(range(2 , n + 1 ) ) lowerCAmelCase__ : str = [] # this list will be returns. # actual sieve of erathostenes for i in range(len(UpperCamelCase ) ): for j in range(i + 1 , len(UpperCamelCase ) ): if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0): lowerCAmelCase__ : List[Any] = 0 # filters actual prime numbers. lowerCAmelCase__ : List[Any] = [x for x in begin_list if x != 0] # precondition assert isinstance(UpperCamelCase , UpperCamelCase ), "'ans' must been from type list" return ans def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" assert isinstance(UpperCamelCase , UpperCamelCase ) and (n > 2), "'N' must been an int and > 2" lowerCAmelCase__ : List[str] = [] # iterates over all numbers between 2 up to N+1 # if a number is prime then appends to list 'ans' for number in range(2 , n + 1 ): if is_prime(UpperCamelCase ): ans.append(UpperCamelCase ) # precondition assert isinstance(UpperCamelCase , UpperCamelCase ), "'ans' must been from type list" return ans def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" assert isinstance(UpperCamelCase , UpperCamelCase ) and number >= 0, "'number' must been an int and >= 0" lowerCAmelCase__ : Optional[Any] = [] # this list will be returns of the function. # potential prime number factors. lowerCAmelCase__ : Dict = 2 lowerCAmelCase__ : Dict = number if number == 0 or number == 1: ans.append(UpperCamelCase ) # if 'number' not prime then builds the prime factorization of 'number' elif not is_prime(UpperCamelCase ): while quotient != 1: if is_prime(UpperCamelCase ) and (quotient % factor == 0): ans.append(UpperCamelCase ) quotient /= factor else: factor += 1 else: ans.append(UpperCamelCase ) # precondition assert isinstance(UpperCamelCase , UpperCamelCase ), "'ans' must been from type list" return ans def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" assert isinstance(UpperCamelCase , UpperCamelCase ) and ( number >= 0 ), "'number' bust been an int and >= 0" lowerCAmelCase__ : Optional[int] = 0 # prime factorization of 'number' lowerCAmelCase__ : List[str] = prime_factorization(UpperCamelCase ) lowerCAmelCase__ : Any = max(UpperCamelCase ) # precondition assert isinstance(UpperCamelCase , UpperCamelCase ), "'ans' must been from type int" return ans def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" assert isinstance(UpperCamelCase , UpperCamelCase ) and ( number >= 0 ), "'number' bust been an int and >= 0" lowerCAmelCase__ : List[Any] = 0 # prime factorization of 'number' lowerCAmelCase__ : List[str] = prime_factorization(UpperCamelCase ) lowerCAmelCase__ : Optional[int] = min(UpperCamelCase ) # precondition assert isinstance(UpperCamelCase , UpperCamelCase ), "'ans' must been from type int" return ans def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" assert isinstance(UpperCamelCase , UpperCamelCase ), "'number' must been an int" assert isinstance(number % 2 == 0 , UpperCamelCase ), "compare bust been from type bool" return number % 2 == 0 def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" assert isinstance(UpperCamelCase , UpperCamelCase ), "'number' must been an int" assert isinstance(number % 2 != 0 , UpperCamelCase ), "compare bust been from type bool" return number % 2 != 0 def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" assert ( isinstance(UpperCamelCase , UpperCamelCase ) and (number > 2) and is_even(UpperCamelCase ) ), "'number' must been an int, even and > 2" lowerCAmelCase__ : Dict = [] # this list will returned # creates a list of prime numbers between 2 up to 'number' lowerCAmelCase__ : Dict = get_prime_numbers(UpperCamelCase ) lowerCAmelCase__ : Optional[Any] = len(UpperCamelCase ) # run variable for while-loops. lowerCAmelCase__ : List[str] = 0 lowerCAmelCase__ : List[Any] = None # exit variable. for break up the loops lowerCAmelCase__ : Any = True while i < len_pn and loop: lowerCAmelCase__ : List[Any] = i + 1 while j < len_pn and loop: if prime_numbers[i] + prime_numbers[j] == number: lowerCAmelCase__ : Optional[Any] = False ans.append(prime_numbers[i] ) ans.append(prime_numbers[j] ) j += 1 i += 1 # precondition assert ( isinstance(UpperCamelCase , UpperCamelCase ) and (len(UpperCamelCase ) == 2) and (ans[0] + ans[1] == number) and is_prime(ans[0] ) and is_prime(ans[1] ) ), "'ans' must contains two primes. And sum of elements must been eq 'number'" return ans def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ): """simple docstring""" assert ( isinstance(UpperCamelCase , UpperCamelCase ) and isinstance(UpperCamelCase , UpperCamelCase ) and (numbera >= 0) and (numbera >= 0) ), "'number1' and 'number2' must been positive integer." lowerCAmelCase__ : int = 0 while numbera != 0: lowerCAmelCase__ : Any = numbera % numbera lowerCAmelCase__ : str = numbera lowerCAmelCase__ : List[str] = rest # precondition assert isinstance(UpperCamelCase , UpperCamelCase ) and ( numbera >= 0 ), "'number' must been from type int and positive" return numbera def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ): """simple docstring""" assert ( isinstance(UpperCamelCase , UpperCamelCase ) and isinstance(UpperCamelCase , UpperCamelCase ) and (numbera >= 1) and (numbera >= 1) ), "'number1' and 'number2' must been positive integer." lowerCAmelCase__ : int = 1 # actual answer that will be return. # for kgV (x,1) if numbera > 1 and numbera > 1: # builds the prime factorization of 'number1' and 'number2' lowerCAmelCase__ : int = prime_factorization(UpperCamelCase ) lowerCAmelCase__ : Any = prime_factorization(UpperCamelCase ) elif numbera == 1 or numbera == 1: lowerCAmelCase__ : Optional[Any] = [] lowerCAmelCase__ : Dict = [] lowerCAmelCase__ : List[str] = max(UpperCamelCase , UpperCamelCase ) lowerCAmelCase__ : Tuple = 0 lowerCAmelCase__ : str = 0 lowerCAmelCase__ : List[Any] = [] # captured numbers int both 'primeFac1' and 'primeFac2' # iterates through primeFac1 for n in prime_fac_a: if n not in done: if n in prime_fac_a: lowerCAmelCase__ : int = prime_fac_a.count(UpperCamelCase ) lowerCAmelCase__ : Any = prime_fac_a.count(UpperCamelCase ) for _ in range(max(UpperCamelCase , UpperCamelCase ) ): ans *= n else: lowerCAmelCase__ : Any = prime_fac_a.count(UpperCamelCase ) for _ in range(UpperCamelCase ): ans *= n done.append(UpperCamelCase ) # iterates through primeFac2 for n in prime_fac_a: if n not in done: lowerCAmelCase__ : Optional[int] = prime_fac_a.count(UpperCamelCase ) for _ in range(UpperCamelCase ): ans *= n done.append(UpperCamelCase ) # precondition assert isinstance(UpperCamelCase , UpperCamelCase ) and ( ans >= 0 ), "'ans' must been from type int and positive" return ans def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" assert isinstance(UpperCamelCase , UpperCamelCase ) and (n >= 0), "'number' must been a positive int" lowerCAmelCase__ : Optional[Any] = 0 lowerCAmelCase__ : Tuple = 2 # this variable holds the answer while index < n: index += 1 ans += 1 # counts to the next number # if ans not prime then # runs to the next prime number. while not is_prime(UpperCamelCase ): ans += 1 # precondition assert isinstance(UpperCamelCase , UpperCamelCase ) and is_prime( UpperCamelCase ), "'ans' must been a prime number and from type int" return ans def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ): """simple docstring""" assert ( is_prime(UpperCamelCase ) and is_prime(UpperCamelCase ) and (p_number_a < p_number_a) ), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'" lowerCAmelCase__ : Dict = p_number_a + 1 # jump to the next number lowerCAmelCase__ : List[Any] = [] # this list will be returns. # if number is not prime then # fetch the next prime number. while not is_prime(UpperCamelCase ): number += 1 while number < p_number_a: ans.append(UpperCamelCase ) number += 1 # fetch the next prime number. while not is_prime(UpperCamelCase ): number += 1 # precondition assert ( isinstance(UpperCamelCase , UpperCamelCase ) and ans[0] != p_number_a and ans[len(UpperCamelCase ) - 1] != p_number_a ), "'ans' must been a list without the arguments" # 'ans' contains not 'pNumber1' and 'pNumber2' ! return ans def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" assert isinstance(UpperCamelCase , UpperCamelCase ) and (n >= 1), "'n' must been int and >= 1" lowerCAmelCase__ : List[Any] = [] # will be returned. for divisor in range(1 , n + 1 ): if n % divisor == 0: ans.append(UpperCamelCase ) # precondition assert ans[0] == 1 and ans[len(UpperCamelCase ) - 1] == n, "Error in function getDivisiors(...)" return ans def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" assert isinstance(UpperCamelCase , UpperCamelCase ) and ( number > 1 ), "'number' must been an int and >= 1" lowerCAmelCase__ : Optional[int] = get_divisors(UpperCamelCase ) # precondition assert ( isinstance(UpperCamelCase , UpperCamelCase ) and (divisors[0] == 1) and (divisors[len(UpperCamelCase ) - 1] == number) ), "Error in help-function getDivisiors(...)" # summed all divisors up to 'number' (exclusive), hence [:-1] return sum(divisors[:-1] ) == number def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ): """simple docstring""" assert ( isinstance(UpperCamelCase , UpperCamelCase ) and isinstance(UpperCamelCase , UpperCamelCase ) and (denominator != 0) ), "The arguments must been from type int and 'denominator' != 0" # build the greatest common divisor of numerator and denominator. lowerCAmelCase__ : int = gcd(abs(UpperCamelCase ) , abs(UpperCamelCase ) ) # precondition assert ( isinstance(UpperCamelCase , UpperCamelCase ) and (numerator % gcd_of_fraction == 0) and (denominator % gcd_of_fraction == 0) ), "Error in function gcd(...,...)" return (numerator // gcd_of_fraction, denominator // gcd_of_fraction) def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" assert isinstance(UpperCamelCase , UpperCamelCase ) and (n >= 0), "'n' must been a int and >= 0" lowerCAmelCase__ : str = 1 # this will be return. for factor in range(1 , n + 1 ): ans *= factor return ans def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" assert isinstance(UpperCamelCase , UpperCamelCase ) and (n >= 0), "'n' must been an int and >= 0" lowerCAmelCase__ : List[Any] = 0 lowerCAmelCase__ : Any = 1 lowerCAmelCase__ : Optional[Any] = 1 # this will be return for _ in range(n - 1 ): lowerCAmelCase__ : Dict = ans ans += fiba lowerCAmelCase__ : str = tmp return ans
37
0
import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel from transformers.utils import logging logging.set_verbosity_info() _lowerCamelCase : Optional[Any] = logging.get_logger(__name__) def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_=False ) -> int: """simple docstring""" A__ = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((f"""blocks.{i}.norm1.weight""", f"""vit.encoder.layer.{i}.layernorm_before.weight""") ) rename_keys.append((f"""blocks.{i}.norm1.bias""", f"""vit.encoder.layer.{i}.layernorm_before.bias""") ) rename_keys.append((f"""blocks.{i}.attn.proj.weight""", f"""vit.encoder.layer.{i}.attention.output.dense.weight""") ) rename_keys.append((f"""blocks.{i}.attn.proj.bias""", f"""vit.encoder.layer.{i}.attention.output.dense.bias""") ) rename_keys.append((f"""blocks.{i}.norm2.weight""", f"""vit.encoder.layer.{i}.layernorm_after.weight""") ) rename_keys.append((f"""blocks.{i}.norm2.bias""", f"""vit.encoder.layer.{i}.layernorm_after.bias""") ) rename_keys.append((f"""blocks.{i}.mlp.fc1.weight""", f"""vit.encoder.layer.{i}.intermediate.dense.weight""") ) rename_keys.append((f"""blocks.{i}.mlp.fc1.bias""", f"""vit.encoder.layer.{i}.intermediate.dense.bias""") ) rename_keys.append((f"""blocks.{i}.mlp.fc2.weight""", f"""vit.encoder.layer.{i}.output.dense.weight""") ) rename_keys.append((f"""blocks.{i}.mlp.fc2.bias""", f"""vit.encoder.layer.{i}.output.dense.bias""") ) # projection layer + position embeddings rename_keys.extend( [ ('''cls_token''', '''vit.embeddings.cls_token'''), ('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''), ('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''), ('''pos_embed''', '''vit.embeddings.position_embeddings'''), ] ) if base_model: # layernorm + pooler rename_keys.extend( [ ('''norm.weight''', '''layernorm.weight'''), ('''norm.bias''', '''layernorm.bias'''), ('''pre_logits.fc.weight''', '''pooler.dense.weight'''), ('''pre_logits.fc.bias''', '''pooler.dense.bias'''), ] ) # if just the base model, we should remove "vit" from all keys that start with "vit" A__ = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys] else: # layernorm + classification head rename_keys.extend( [ ('''norm.weight''', '''vit.layernorm.weight'''), ('''norm.bias''', '''vit.layernorm.bias'''), ('''head.weight''', '''classifier.weight'''), ('''head.bias''', '''classifier.bias'''), ] ) return rename_keys def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_=False ) -> List[str]: """simple docstring""" for i in range(config.num_hidden_layers ): if base_model: A__ = """""" else: A__ = """vit.""" # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) A__ = state_dict.pop(f"""blocks.{i}.attn.qkv.weight""" ) A__ = state_dict.pop(f"""blocks.{i}.attn.qkv.bias""" ) # next, add query, keys and values (in that order) to the state dict A__ = in_proj_weight[ : config.hidden_size, : ] A__ = in_proj_bias[: config.hidden_size] A__ = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] A__ = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] A__ = in_proj_weight[ -config.hidden_size :, : ] A__ = in_proj_bias[-config.hidden_size :] def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Optional[int]: """simple docstring""" A__ = ["""head.weight""", """head.bias"""] for k in ignore_keys: state_dict.pop(lowercase_ , lowercase_ ) def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> Optional[Any]: """simple docstring""" A__ = dct.pop(lowercase_ ) A__ = val def SCREAMING_SNAKE_CASE ( ) -> str: """simple docstring""" A__ = """http://images.cocodataset.org/val2017/000000039769.jpg""" A__ = Image.open(requests.get(lowercase_ , stream=lowercase_ ).raw ) return im @torch.no_grad() def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Union[str, Any]: """simple docstring""" A__ = ViTConfig() A__ = False # dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size if vit_name[-5:] == "in21k": A__ = True A__ = int(vit_name[-12:-10] ) A__ = int(vit_name[-9:-6] ) else: A__ = 1_000 A__ = """huggingface/label-files""" A__ = """imagenet-1k-id2label.json""" A__ = json.load(open(hf_hub_download(lowercase_ , lowercase_ , repo_type='''dataset''' ) , '''r''' ) ) A__ = {int(lowercase_ ): v for k, v in idalabel.items()} A__ = idalabel A__ = {v: k for k, v in idalabel.items()} A__ = int(vit_name[-6:-4] ) A__ = int(vit_name[-3:] ) # size of the architecture if "deit" in vit_name: if vit_name[9:].startswith('''tiny''' ): A__ = 192 A__ = 768 A__ = 12 A__ = 3 elif vit_name[9:].startswith('''small''' ): A__ = 384 A__ = 1_536 A__ = 12 A__ = 6 else: pass else: if vit_name[4:].startswith('''small''' ): A__ = 768 A__ = 2_304 A__ = 8 A__ = 8 elif vit_name[4:].startswith('''base''' ): pass elif vit_name[4:].startswith('''large''' ): A__ = 1_024 A__ = 4_096 A__ = 24 A__ = 16 elif vit_name[4:].startswith('''huge''' ): A__ = 1_280 A__ = 5_120 A__ = 32 A__ = 16 # load original model from timm A__ = timm.create_model(lowercase_ , pretrained=lowercase_ ) timm_model.eval() # load state_dict of original model, remove and rename some keys A__ = timm_model.state_dict() if base_model: remove_classification_head_(lowercase_ ) A__ = create_rename_keys(lowercase_ , lowercase_ ) for src, dest in rename_keys: rename_key(lowercase_ , lowercase_ , lowercase_ ) read_in_q_k_v(lowercase_ , lowercase_ , lowercase_ ) # load HuggingFace model if vit_name[-5:] == "in21k": A__ = ViTModel(lowercase_ ).eval() else: A__ = ViTForImageClassification(lowercase_ ).eval() model.load_state_dict(lowercase_ ) # Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor if "deit" in vit_name: A__ = DeiTImageProcessor(size=config.image_size ) else: A__ = ViTImageProcessor(size=config.image_size ) A__ = image_processor(images=prepare_img() , return_tensors='''pt''' ) A__ = encoding["""pixel_values"""] A__ = model(lowercase_ ) if base_model: A__ = timm_model.forward_features(lowercase_ ) assert timm_pooled_output.shape == outputs.pooler_output.shape assert torch.allclose(lowercase_ , outputs.pooler_output , atol=1E-3 ) else: A__ = timm_model(lowercase_ ) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(lowercase_ , outputs.logits , atol=1E-3 ) Path(lowercase_ ).mkdir(exist_ok=lowercase_ ) print(f"""Saving model {vit_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(lowercase_ ) print(f"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(lowercase_ ) if __name__ == "__main__": _lowerCamelCase : Dict = argparse.ArgumentParser() # Required parameters parser.add_argument( """--vit_name""", default="""vit_base_patch16_224""", type=str, help="""Name of the ViT timm model you\'d like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) _lowerCamelCase : List[Any] = parser.parse_args() convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
14
'''simple docstring''' from sklearn.metrics import fa_score, matthews_corrcoef import datasets from .record_evaluation import evaluate as evaluate_record _lowerCAmelCase = '''\ @article{wang2019superglue, title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems}, author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R}, journal={arXiv preprint arXiv:1905.00537}, year={2019} } ''' _lowerCAmelCase = '''\ SuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after GLUE with a new set of more difficult language understanding tasks, improved resources, and a new public leaderboard. ''' _lowerCAmelCase = ''' Compute SuperGLUE evaluation metric associated to each SuperGLUE dataset. Args: predictions: list of predictions to score. Depending on the SuperGlUE subset: - for \'record\': list of question-answer dictionaries with the following keys: - \'idx\': index of the question as specified by the dataset - \'prediction_text\': the predicted answer text - for \'multirc\': list of question-answer dictionaries with the following keys: - \'idx\': index of the question-answer pair as specified by the dataset - \'prediction\': the predicted answer label - otherwise: list of predicted labels references: list of reference labels. Depending on the SuperGLUE subset: - for \'record\': list of question-answers dictionaries with the following keys: - \'idx\': index of the question as specified by the dataset - \'answers\': list of possible answers - otherwise: list of reference labels Returns: depending on the SuperGLUE subset: - for \'record\': - \'exact_match\': Exact match between answer and gold answer - \'f1\': F1 score - for \'multirc\': - \'exact_match\': Exact match between answer and gold answer - \'f1_m\': Per-question macro-F1 score - \'f1_a\': Average F1 score over all answers - for \'axb\': \'matthews_correlation\': Matthew Correlation - for \'cb\': - \'accuracy\': Accuracy - \'f1\': F1 score - for all others: - \'accuracy\': Accuracy Examples: >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'copa\') # any of ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"] >>> predictions = [0, 1] >>> references = [0, 1] >>> results = super_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {\'accuracy\': 1.0} >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'cb\') >>> predictions = [0, 1] >>> references = [0, 1] >>> results = super_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {\'accuracy\': 1.0, \'f1\': 1.0} >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'record\') >>> predictions = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'prediction_text\': \'answer\'}] >>> references = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'answers\': [\'answer\', \'another_answer\']}] >>> results = super_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {\'exact_match\': 1.0, \'f1\': 1.0} >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'multirc\') >>> predictions = [{\'idx\': {\'answer\': 0, \'paragraph\': 0, \'question\': 0}, \'prediction\': 0}, {\'idx\': {\'answer\': 1, \'paragraph\': 2, \'question\': 3}, \'prediction\': 1}] >>> references = [0, 1] >>> results = super_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {\'exact_match\': 1.0, \'f1_m\': 1.0, \'f1_a\': 1.0} >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'axb\') >>> references = [0, 1] >>> predictions = [0, 1] >>> results = super_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {\'matthews_correlation\': 1.0} ''' def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ): """simple docstring""" return float((preds == labels).mean() ) def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase="binary" ): """simple docstring""" lowerCAmelCase__ : Any = simple_accuracy(UpperCamelCase , UpperCamelCase ) lowerCAmelCase__ : Tuple = float(fa_score(y_true=UpperCamelCase , y_pred=UpperCamelCase , average=UpperCamelCase ) ) return { "accuracy": acc, "f1": fa, } def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ): """simple docstring""" lowerCAmelCase__ : List[str] = {} for id_pred, label in zip(UpperCamelCase , UpperCamelCase ): lowerCAmelCase__ : str = f"""{id_pred['idx']['paragraph']}-{id_pred['idx']['question']}""" lowerCAmelCase__ : Dict = id_pred["""prediction"""] if question_id in question_map: question_map[question_id].append((pred, label) ) else: lowerCAmelCase__ : Optional[int] = [(pred, label)] lowerCAmelCase__ , lowerCAmelCase__ : int = [], [] for question, preds_labels in question_map.items(): lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = zip(*UpperCamelCase ) lowerCAmelCase__ : List[Any] = fa_score(y_true=UpperCamelCase , y_pred=UpperCamelCase , average="""macro""" ) fas.append(UpperCamelCase ) lowerCAmelCase__ : Union[str, Any] = int(sum(pred == label for pred, label in preds_labels ) == len(UpperCamelCase ) ) ems.append(UpperCamelCase ) lowerCAmelCase__ : Optional[Any] = float(sum(UpperCamelCase ) / len(UpperCamelCase ) ) lowerCAmelCase__ : List[Any] = sum(UpperCamelCase ) / len(UpperCamelCase ) lowerCAmelCase__ : Dict = float(fa_score(y_true=UpperCamelCase , y_pred=[id_pred["""prediction"""] for id_pred in ids_preds] ) ) return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a} @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCAmelCase_( datasets.Metric ): '''simple docstring''' def UpperCAmelCase_ ( self ) -> Optional[Any]: if self.config_name not in [ "boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg", ]: raise KeyError( """You should supply a configuration name selected in """ """[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]""" ) return datasets.MetricInfo( description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(self._get_feature_types() ) ,codebase_urls=[] ,reference_urls=[] ,format="""numpy""" if not self.config_name == """record""" and not self.config_name == """multirc""" else None ,) def UpperCAmelCase_ ( self ) -> str: if self.config_name == "record": return { "predictions": { "idx": { "passage": datasets.Value("""int64""" ), "query": datasets.Value("""int64""" ), }, "prediction_text": datasets.Value("""string""" ), }, "references": { "idx": { "passage": datasets.Value("""int64""" ), "query": datasets.Value("""int64""" ), }, "answers": datasets.Sequence(datasets.Value("""string""" ) ), }, } elif self.config_name == "multirc": return { "predictions": { "idx": { "answer": datasets.Value("""int64""" ), "paragraph": datasets.Value("""int64""" ), "question": datasets.Value("""int64""" ), }, "prediction": datasets.Value("""int64""" ), }, "references": datasets.Value("""int64""" ), } else: return { "predictions": datasets.Value("""int64""" ), "references": datasets.Value("""int64""" ), } def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ) -> Any: if self.config_name == "axb": return {"matthews_correlation": matthews_corrcoef(__UpperCAmelCase ,__UpperCAmelCase )} elif self.config_name == "cb": return acc_and_fa(__UpperCAmelCase ,__UpperCAmelCase ,fa_avg="""macro""" ) elif self.config_name == "record": lowerCAmelCase__ : Optional[Any] = [ { """qas""": [ {"""id""": ref["""idx"""]["""query"""], """answers""": [{"""text""": ans} for ans in ref["""answers"""]]} for ref in references ] } ] lowerCAmelCase__ : Union[str, Any] = {pred["""idx"""]["""query"""]: pred["""prediction_text"""] for pred in predictions} return evaluate_record(__UpperCAmelCase ,__UpperCAmelCase )[0] elif self.config_name == "multirc": return evaluate_multirc(__UpperCAmelCase ,__UpperCAmelCase ) elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]: return {"accuracy": simple_accuracy(__UpperCAmelCase ,__UpperCAmelCase )} else: raise KeyError( """You should supply a configuration name selected in """ """[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]""" )
37
0
from abc import ABC, abstractmethod from typing import Optional, Union from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit from ..utils.typing import NestedDataStructureLike, PathLike class lowerCamelCase (SCREAMING_SNAKE_CASE_ ): """simple docstring""" def __init__( self : Optional[int] , __magic_name__ : Union[str, Any] = None , __magic_name__ : Dict = None , __magic_name__ : Optional[int] = None , __magic_name__ : int = None , __magic_name__ : Dict = False , __magic_name__ : Any = False , __magic_name__ : str = None , **__magic_name__ : Optional[int] , ) -> Optional[Any]: SCREAMING_SNAKE_CASE_ = path_or_paths SCREAMING_SNAKE_CASE_ = split if split or isinstance(__UpperCAmelCase , __UpperCAmelCase ) else """train""" SCREAMING_SNAKE_CASE_ = features SCREAMING_SNAKE_CASE_ = cache_dir SCREAMING_SNAKE_CASE_ = keep_in_memory SCREAMING_SNAKE_CASE_ = streaming SCREAMING_SNAKE_CASE_ = num_proc SCREAMING_SNAKE_CASE_ = kwargs @abstractmethod def __A ( self : int ) -> Union[Dataset, DatasetDict, IterableDataset, IterableDatasetDict]: pass class lowerCamelCase (SCREAMING_SNAKE_CASE_ ): """simple docstring""" def __init__( self : Union[str, Any] , __magic_name__ : Tuple = None , __magic_name__ : Union[str, Any] = None , __magic_name__ : Any = False , __magic_name__ : str = False , __magic_name__ : Optional[Any] = None , **__magic_name__ : Union[str, Any] , ) -> Dict: SCREAMING_SNAKE_CASE_ = features SCREAMING_SNAKE_CASE_ = cache_dir SCREAMING_SNAKE_CASE_ = keep_in_memory SCREAMING_SNAKE_CASE_ = streaming SCREAMING_SNAKE_CASE_ = num_proc SCREAMING_SNAKE_CASE_ = kwargs @abstractmethod def __A ( self : Tuple ) -> Union[Dataset, IterableDataset]: pass
118
'''simple docstring''' import unittest from pathlib import Path from tempfile import NamedTemporaryFile, TemporaryDirectory from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline from transformers.convert_graph_to_onnx import ( convert, ensure_valid_input, generate_identified_filename, infer_shapes, quantize, ) from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow class lowerCAmelCase_: '''simple docstring''' def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> Optional[Any]: return None class lowerCAmelCase_: '''simple docstring''' def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> Tuple: return None class lowerCAmelCase_( unittest.TestCase ): '''simple docstring''' __lowercase : Dict = [ # (model_name, model_kwargs) ('''bert-base-cased''', {}), ('''gpt2''', {'''use_cache''': False}), # We don't support exporting GPT2 past keys anymore ] @require_tf @slow def UpperCAmelCase_ ( self ) -> int: for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: self._test_export(__UpperCAmelCase ,"""tf""" ,12 ,**__UpperCAmelCase ) @require_torch @slow def UpperCAmelCase_ ( self ) -> Union[str, Any]: for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: self._test_export(__UpperCAmelCase ,"""pt""" ,12 ,**__UpperCAmelCase ) @require_torch @slow def UpperCAmelCase_ ( self ) -> Any: from transformers import BertModel lowerCAmelCase__ : Optional[int] = ["""[UNK]""", """[SEP]""", """[CLS]""", """[PAD]""", """[MASK]""", """some""", """other""", """words"""] with NamedTemporaryFile(mode="""w+t""" ) as vocab_file: vocab_file.write("""\n""".join(__UpperCAmelCase ) ) vocab_file.flush() lowerCAmelCase__ : Dict = BertTokenizerFast(vocab_file.name ) with TemporaryDirectory() as bert_save_dir: lowerCAmelCase__ : Tuple = BertModel(BertConfig(vocab_size=len(__UpperCAmelCase ) ) ) model.save_pretrained(__UpperCAmelCase ) self._test_export(__UpperCAmelCase ,"""pt""" ,12 ,__UpperCAmelCase ) @require_tf @slow def UpperCAmelCase_ ( self ) -> List[str]: for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: lowerCAmelCase__ : Dict = self._test_export(__UpperCAmelCase ,"""tf""" ,12 ,**__UpperCAmelCase ) lowerCAmelCase__ : List[str] = quantize(Path(__UpperCAmelCase ) ) # Ensure the actual quantized model is not bigger than the original one if quantized_path.stat().st_size >= Path(__UpperCAmelCase ).stat().st_size: self.fail("""Quantized model is bigger than initial ONNX model""" ) @require_torch @slow def UpperCAmelCase_ ( self ) -> List[Any]: for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: lowerCAmelCase__ : Any = self._test_export(__UpperCAmelCase ,"""pt""" ,12 ,**__UpperCAmelCase ) lowerCAmelCase__ : Dict = quantize(__UpperCAmelCase ) # Ensure the actual quantized model is not bigger than the original one if quantized_path.stat().st_size >= Path(__UpperCAmelCase ).stat().st_size: self.fail("""Quantized model is bigger than initial ONNX model""" ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase=None ,**__UpperCAmelCase ) -> Optional[Any]: try: # Compute path with TemporaryDirectory() as tempdir: lowerCAmelCase__ : Optional[int] = Path(__UpperCAmelCase ).joinpath("""model.onnx""" ) # Remove folder if exists if path.parent.exists(): path.parent.rmdir() # Export convert(__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,**__UpperCAmelCase ) return path except Exception as e: self.fail(__UpperCAmelCase ) @require_torch @require_tokenizers @slow def UpperCAmelCase_ ( self ) -> Union[str, Any]: from transformers import BertModel lowerCAmelCase__ : List[Any] = BertModel(BertConfig.from_pretrained("""lysandre/tiny-bert-random""" ) ) lowerCAmelCase__ : Union[str, Any] = BertTokenizerFast.from_pretrained("""lysandre/tiny-bert-random""" ) self._test_infer_dynamic_axis(__UpperCAmelCase ,__UpperCAmelCase ,"""pt""" ) @require_tf @require_tokenizers @slow def UpperCAmelCase_ ( self ) -> Optional[int]: from transformers import TFBertModel lowerCAmelCase__ : int = TFBertModel(BertConfig.from_pretrained("""lysandre/tiny-bert-random""" ) ) lowerCAmelCase__ : Optional[int] = BertTokenizerFast.from_pretrained("""lysandre/tiny-bert-random""" ) self._test_infer_dynamic_axis(__UpperCAmelCase ,__UpperCAmelCase ,"""tf""" ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> Tuple: lowerCAmelCase__ : Any = FeatureExtractionPipeline(__UpperCAmelCase ,__UpperCAmelCase ) lowerCAmelCase__ : List[str] = ["""input_ids""", """token_type_ids""", """attention_mask""", """output_0""", """output_1"""] lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = infer_shapes(__UpperCAmelCase ,__UpperCAmelCase ) # Assert all variables are present self.assertEqual(len(__UpperCAmelCase ) ,len(__UpperCAmelCase ) ) self.assertTrue(all(var_name in shapes for var_name in variable_names ) ) self.assertSequenceEqual(variable_names[:3] ,__UpperCAmelCase ) self.assertSequenceEqual(variable_names[3:] ,__UpperCAmelCase ) # Assert inputs are {0: batch, 1: sequence} for var_name in ["input_ids", "token_type_ids", "attention_mask"]: self.assertDictEqual(shapes[var_name] ,{0: """batch""", 1: """sequence"""} ) # Assert outputs are {0: batch, 1: sequence} and {0: batch} self.assertDictEqual(shapes["""output_0"""] ,{0: """batch""", 1: """sequence"""} ) self.assertDictEqual(shapes["""output_1"""] ,{0: """batch"""} ) def UpperCAmelCase_ ( self ) -> Optional[int]: lowerCAmelCase__ : List[str] = ["""input_ids""", """attention_mask""", """token_type_ids"""] lowerCAmelCase__ : Union[str, Any] = {"""input_ids""": [1, 2, 3, 4], """attention_mask""": [0, 0, 0, 0], """token_type_ids""": [1, 1, 1, 1]} lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = ensure_valid_input(FuncContiguousArgs() ,__UpperCAmelCase ,__UpperCAmelCase ) # Should have exactly the same number of args (all are valid) self.assertEqual(len(__UpperCAmelCase ) ,3 ) # Should have exactly the same input names self.assertEqual(set(__UpperCAmelCase ) ,set(__UpperCAmelCase ) ) # Parameter should be reordered according to their respective place in the function: # (input_ids, token_type_ids, attention_mask) self.assertEqual(__UpperCAmelCase ,(tokens["""input_ids"""], tokens["""token_type_ids"""], tokens["""attention_mask"""]) ) # Generated args are interleaved with another args (for instance parameter "past" in GPT2) lowerCAmelCase__ , lowerCAmelCase__ : int = ensure_valid_input(FuncNonContiguousArgs() ,__UpperCAmelCase ,__UpperCAmelCase ) # Should have exactly the one arg (all before the one not provided "some_other_args") self.assertEqual(len(__UpperCAmelCase ) ,1 ) self.assertEqual(len(__UpperCAmelCase ) ,1 ) # Should have only "input_ids" self.assertEqual(inputs_args[0] ,tokens["""input_ids"""] ) self.assertEqual(ordered_input_names[0] ,"""input_ids""" ) def UpperCAmelCase_ ( self ) -> Tuple: lowerCAmelCase__ : Dict = generate_identified_filename(Path("""/home/something/my_fake_model.onnx""" ) ,"""-test""" ) self.assertEqual("""/home/something/my_fake_model-test.onnx""" ,generated.as_posix() )
37
0
"""simple docstring""" from __future__ import annotations import time from collections.abc import Sequence from random import randint from matplotlib import pyplot as plt def __magic_name__ ( lowercase , lowercase , lowercase ): if not arr: return None, None, 0 if low == high: return low, high, arr[low] SCREAMING_SNAKE_CASE_: Union[str, Any] =(low + high) // 2 SCREAMING_SNAKE_CASE_: Dict =max_subarray(lowercase , lowercase , lowercase ) SCREAMING_SNAKE_CASE_: int =max_subarray(lowercase , mid + 1 , lowercase ) SCREAMING_SNAKE_CASE_: Union[str, Any] =max_cross_sum(lowercase , lowercase , lowercase , lowercase ) if left_sum >= right_sum and left_sum >= cross_sum: return left_low, left_high, left_sum elif right_sum >= left_sum and right_sum >= cross_sum: return right_low, right_high, right_sum return cross_left, cross_right, cross_sum def __magic_name__ ( lowercase , lowercase , lowercase , lowercase ): SCREAMING_SNAKE_CASE_: Any =float("""-inf""" ), -1 SCREAMING_SNAKE_CASE_: str =float("""-inf""" ), -1 SCREAMING_SNAKE_CASE_: int | float =0 for i in range(lowercase , low - 1 , -1 ): summ += arr[i] if summ > left_sum: SCREAMING_SNAKE_CASE_: Optional[int] =summ SCREAMING_SNAKE_CASE_: List[Any] =i SCREAMING_SNAKE_CASE_: Optional[Any] =0 for i in range(mid + 1 , high + 1 ): summ += arr[i] if summ > right_sum: SCREAMING_SNAKE_CASE_: Optional[Any] =summ SCREAMING_SNAKE_CASE_: List[str] =i return max_left, max_right, (left_sum + right_sum) def __magic_name__ ( lowercase ): SCREAMING_SNAKE_CASE_: Optional[Any] =[randint(1 , lowercase ) for _ in range(lowercase )] SCREAMING_SNAKE_CASE_: Union[str, Any] =time.time() max_subarray(lowercase , 0 , input_size - 1 ) SCREAMING_SNAKE_CASE_: str =time.time() return end - start def __magic_name__ ( ): SCREAMING_SNAKE_CASE_: Any =[10, 100, 1000, 1_0000, 5_0000, 10_0000, 20_0000, 30_0000, 40_0000, 50_0000] SCREAMING_SNAKE_CASE_: List[Any] =[time_max_subarray(lowercase ) for input_size in input_sizes] print("""No of Inputs\t\tTime Taken""" ) for input_size, runtime in zip(lowercase , lowercase ): print(lowercase , """\t\t""" , lowercase ) plt.plot(lowercase , lowercase ) plt.xlabel("""Number of Inputs""" ) plt.ylabel("""Time taken in seconds""" ) plt.show() if __name__ == "__main__": from doctest import testmod testmod()
173
'''simple docstring''' from maths.prime_factors import prime_factors def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" if not isinstance(UpperCamelCase , UpperCamelCase ): lowerCAmelCase__ : int = f"""Input value of [number={number}] must be an integer""" raise TypeError(UpperCamelCase ) if number < 1: raise ValueError("""Input must be a positive integer""" ) return -1 if len(prime_factors(UpperCamelCase ) ) % 2 else 1 if __name__ == "__main__": import doctest doctest.testmod()
37
0
'''simple docstring''' import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging __lowerCAmelCase = logging.get_logger(__name__) __lowerCAmelCase = {'''vocab_file''': '''spm_char.model'''} __lowerCAmelCase = { '''vocab_file''': { '''microsoft/speecht5_asr''': '''https://huggingface.co/microsoft/speecht5_asr/resolve/main/spm_char.model''', '''microsoft/speecht5_tts''': '''https://huggingface.co/microsoft/speecht5_tts/resolve/main/spm_char.model''', '''microsoft/speecht5_vc''': '''https://huggingface.co/microsoft/speecht5_vc/resolve/main/spm_char.model''', } } __lowerCAmelCase = { '''microsoft/speecht5_asr''': 1_024, '''microsoft/speecht5_tts''': 1_024, '''microsoft/speecht5_vc''': 1_024, } class __magic_name__ ( SCREAMING_SNAKE_CASE_ ): lowerCAmelCase : Dict = VOCAB_FILES_NAMES lowerCAmelCase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP lowerCAmelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCAmelCase : str = ['''input_ids''', '''attention_mask'''] def __init__( self : str ,_UpperCAmelCase : Tuple ,_UpperCAmelCase : Dict="<s>" ,_UpperCAmelCase : Optional[int]="</s>" ,_UpperCAmelCase : Optional[int]="<unk>" ,_UpperCAmelCase : Tuple="<pad>" ,_UpperCAmelCase : Tuple = None ,**_UpperCAmelCase : Union[str, Any] ,): _a : Any = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=__UpperCAmelCase ,eos_token=__UpperCAmelCase ,unk_token=__UpperCAmelCase ,pad_token=__UpperCAmelCase ,sp_model_kwargs=self.sp_model_kwargs ,**__UpperCAmelCase ,) _a : Dict = vocab_file _a : int = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(__UpperCAmelCase ) @property def __lowercase ( self : Any ): return self.sp_model.get_piece_size() def __lowercase ( self : Tuple ): _a : List[str] = {self.convert_ids_to_tokens(__UpperCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self : Tuple ): _a : Any = self.__dict__.copy() _a : Dict = None return state def __setstate__( self : Optional[Any] ,_UpperCAmelCase : Optional[int] ): _a : List[str] = d # for backward compatibility if not hasattr(self ,'sp_model_kwargs' ): _a : int = {} _a : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def __lowercase ( self : Dict ,_UpperCAmelCase : Any ): return self.sp_model.encode(__UpperCAmelCase ,out_type=__UpperCAmelCase ) def __lowercase ( self : Dict ,_UpperCAmelCase : Optional[int] ): return self.sp_model.piece_to_id(__UpperCAmelCase ) def __lowercase ( self : Dict ,_UpperCAmelCase : Tuple ): _a : Tuple = self.sp_model.IdToPiece(__UpperCAmelCase ) return token def __lowercase ( self : Tuple ,_UpperCAmelCase : Union[str, Any] ): _a : Union[str, Any] = [] _a : Optional[Any] = """""" for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: out_string += self.sp_model.decode(__UpperCAmelCase ) + token _a : Dict = [] else: current_sub_tokens.append(__UpperCAmelCase ) out_string += self.sp_model.decode(__UpperCAmelCase ) return out_string.strip() def __lowercase ( self : Optional[int] ,_UpperCAmelCase : Optional[int] ,_UpperCAmelCase : Dict=None ): if token_ids_a is None: return token_ids_a + [self.eos_token_id] # We don't expect to process pairs, but leave the pair logic for API consistency return token_ids_a + token_ids_a + [self.eos_token_id] def __lowercase ( self : Optional[Any] ,_UpperCAmelCase : Dict ,_UpperCAmelCase : Tuple = None ,_UpperCAmelCase : Optional[int] = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__UpperCAmelCase ,token_ids_a=__UpperCAmelCase ,already_has_special_tokens=__UpperCAmelCase ) _a : str = [1] if token_ids_a is None: return ([0] * len(__UpperCAmelCase )) + suffix_ones return ([0] * len(__UpperCAmelCase )) + ([0] * len(__UpperCAmelCase )) + suffix_ones def __lowercase ( self : Tuple ,_UpperCAmelCase : Tuple ,_UpperCAmelCase : List[str] = None ): if not os.path.isdir(__UpperCAmelCase ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return _a : Dict = os.path.join( __UpperCAmelCase ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file ,__UpperCAmelCase ) elif not os.path.isfile(self.vocab_file ): with open(__UpperCAmelCase ,'wb' ) as fi: _a : str = self.sp_model.serialized_model_proto() fi.write(__UpperCAmelCase ) return (out_vocab_file,)
89
'''simple docstring''' import os import re import unicodedata from shutil import copyfile from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import is_torch_available, logging if is_torch_available(): import torch if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation _lowerCAmelCase = logging.get_logger(__name__) _lowerCAmelCase = {'''vocab_file''': '''spiece.model'''} _lowerCAmelCase = { '''vocab_file''': { '''AI-Sweden/gpt-sw3-126m''': '''https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model''', '''AI-Sweden/gpt-sw3-350m''': '''https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model''', '''AI-Sweden/gpt-sw3-1.6b''': '''https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model''', '''AI-Sweden/gpt-sw3-6.7b''': '''https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model''', '''AI-Sweden/gpt-sw3-20b''': '''https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model''', } } _lowerCAmelCase = { '''AI-Sweden/gpt-sw3-126m''': 2048, '''AI-Sweden/gpt-sw3-350m''': 2048, '''AI-Sweden/gpt-sw3-1.6b''': 2048, '''AI-Sweden/gpt-sw3-6.7b''': 2048, '''AI-Sweden/gpt-sw3-20b''': 2048, } class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' __lowercase : Dict = VOCAB_FILES_NAMES __lowercase : str = PRETRAINED_VOCAB_FILES_MAP __lowercase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowercase : Optional[int] = ['''input_ids''', '''attention_mask'''] def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase=False ,__UpperCAmelCase=False ,__UpperCAmelCase=False ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,__UpperCAmelCase = None ,**__UpperCAmelCase ,) -> None: lowerCAmelCase__ : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs lowerCAmelCase__ : Dict = kwargs.get("""name_or_path""" ) if name_or_path is None: logger.warning( """name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,""" """ you are testing the model, this can safely be ignored""" ) lowerCAmelCase__ : Tuple = """None""" # Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing lowerCAmelCase__ : Union[str, Any] = """<|endoftext|>""" if eos_token is None else eos_token lowerCAmelCase__ : Dict = """<unk>""" if unk_token is None else unk_token if "gpt-sw3-7b" in name_or_path: lowerCAmelCase__ : Any = unk_token if pad_token is None else pad_token lowerCAmelCase__ : Dict = eos_token if bos_token is None else bos_token else: lowerCAmelCase__ : List[str] = """<pad>""" if pad_token is None else pad_token lowerCAmelCase__ : Optional[int] = """<s>""" if bos_token is None else bos_token super().__init__( do_lower_case=__UpperCAmelCase ,remove_space=__UpperCAmelCase ,keep_accents=__UpperCAmelCase ,bos_token=__UpperCAmelCase ,eos_token=__UpperCAmelCase ,unk_token=__UpperCAmelCase ,pad_token=__UpperCAmelCase ,sp_model_kwargs=self.sp_model_kwargs ,**__UpperCAmelCase ,) lowerCAmelCase__ : Optional[int] = do_lower_case lowerCAmelCase__ : Dict = remove_space lowerCAmelCase__ : Optional[Any] = keep_accents lowerCAmelCase__ : int = vocab_file lowerCAmelCase__ : str = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(__UpperCAmelCase ) # Used for whitespace normalization in input texts # fmt : off lowerCAmelCase__ : int = {""" """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """""", """„"""} # fmt : on # Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing lowerCAmelCase__ : List[str] = re.compile( F"""[{''.join(map(__UpperCAmelCase ,list(range(0 ,9 ) ) + list(range(11 ,32 ) ) + list(range(127 ,160 ) ) + [160, 173, 8203] ) )}]""" ) def __getstate__( self ) -> Any: lowerCAmelCase__ : int = self.__dict__.copy() lowerCAmelCase__ : Optional[int] = None return state def __setstate__( self ,__UpperCAmelCase ) -> List[str]: lowerCAmelCase__ : List[str] = d # for backward compatibility if not hasattr(self ,"""sp_model_kwargs""" ): lowerCAmelCase__ : Tuple = {} lowerCAmelCase__ : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) @property # Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size def UpperCAmelCase_ ( self ) -> int: return len(self.sp_model ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> str: lowerCAmelCase__ : Tuple = self.non_printing_characters_re.sub("""""" ,__UpperCAmelCase ) # Normalize whitespaces lowerCAmelCase__ : List[Any] = """""".join([char if char not in self.whitespaces else """ """ for char in text] ) # NFC Unicode normalization lowerCAmelCase__ : List[Any] = unicodedata.normalize("""NFC""" ,__UpperCAmelCase ) return text def UpperCAmelCase_ ( self ,__UpperCAmelCase ,**__UpperCAmelCase ) -> List[str]: lowerCAmelCase__ : List[Any] = self.preprocess_text(__UpperCAmelCase ) return self.sp_model.encode(__UpperCAmelCase ,out_type=__UpperCAmelCase ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> int: return self.sp_model.PieceToId(__UpperCAmelCase ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> str: return self.sp_model.IdToPiece(__UpperCAmelCase ) @staticmethod def UpperCAmelCase_ ( __UpperCAmelCase ) -> str: return out_string def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> str: lowerCAmelCase__ : int = [] lowerCAmelCase__ : Optional[int] = """""" lowerCAmelCase__ : Tuple = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: # TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document if not prev_is_special: out_string += " " out_string += self.sp_model.decode(__UpperCAmelCase ) + token lowerCAmelCase__ : Union[str, Any] = True lowerCAmelCase__ : Optional[Any] = [] else: current_sub_tokens.append(__UpperCAmelCase ) lowerCAmelCase__ : Any = False out_string += self.sp_model.decode(__UpperCAmelCase ) return out_string def UpperCAmelCase_ ( self ) -> Dict[str, int]: lowerCAmelCase__ : Optional[int] = {self.convert_ids_to_tokens(__UpperCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ) -> Tuple[str]: if not os.path.isdir(__UpperCAmelCase ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return lowerCAmelCase__ : Optional[int] = os.path.join( __UpperCAmelCase ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file ,__UpperCAmelCase ) elif not os.path.isfile(self.vocab_file ): with open(__UpperCAmelCase ,"""wb""" ) as fi: lowerCAmelCase__ : str = self.sp_model.serialized_model_proto() fi.write(__UpperCAmelCase ) return (out_vocab_file,) def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = False ) -> Union[List[int], List[List[int]], "torch.Tensor"]: if isinstance(__UpperCAmelCase ,__UpperCAmelCase ): lowerCAmelCase__ : Tuple = self.preprocess_text(__UpperCAmelCase ) lowerCAmelCase__ : int = self.sp_model.encode(__UpperCAmelCase ) else: lowerCAmelCase__ : int = [self.preprocess_text(__UpperCAmelCase ) for t in text] lowerCAmelCase__ : Any = self.sp_model.encode(__UpperCAmelCase ) if return_tensors is True or return_tensors == "pt": lowerCAmelCase__ : Tuple = torch.tensor(__UpperCAmelCase ) return token_ids def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> str: return self.sp_model.decode(__UpperCAmelCase ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> List[int]: lowerCAmelCase__ : List[Any] = [F"""User: {text}""" if is_user else F"""Bot: {text}""" for is_user, text in conversation.iter_texts()] lowerCAmelCase__ : Any = ( F"""{self.eos_token}{self.bos_token}""" + F"""{self.bos_token}""".join(__UpperCAmelCase ) + F"""{self.bos_token}Bot:""" ) return self.encode(text=__UpperCAmelCase )
37
0
import unittest from transformers import MraConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_torch_available(): import torch from transformers import ( MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, MraModel, ) from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST class lowercase : def __init__( self , snake_case , snake_case=2 , snake_case=8 , snake_case=True , snake_case=True , snake_case=True , snake_case=True , snake_case=99 , snake_case=16 , snake_case=5 , snake_case=2 , snake_case=36 , snake_case="gelu" , snake_case=0.0 , snake_case=0.0 , snake_case=512 , snake_case=16 , snake_case=2 , snake_case=0.02 , snake_case=3 , snake_case=4 , snake_case=None , ): snake_case_ = parent snake_case_ = batch_size snake_case_ = seq_length snake_case_ = is_training snake_case_ = use_input_mask snake_case_ = use_token_type_ids snake_case_ = use_labels snake_case_ = vocab_size snake_case_ = hidden_size snake_case_ = num_hidden_layers snake_case_ = num_attention_heads snake_case_ = intermediate_size snake_case_ = hidden_act snake_case_ = hidden_dropout_prob snake_case_ = attention_probs_dropout_prob snake_case_ = max_position_embeddings snake_case_ = type_vocab_size snake_case_ = type_sequence_label_size snake_case_ = initializer_range snake_case_ = num_labels snake_case_ = num_choices snake_case_ = scope def a ( self ): snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) snake_case_ = None if self.use_input_mask: snake_case_ = random_attention_mask([self.batch_size, self.seq_length] ) snake_case_ = None if self.use_token_type_ids: snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) snake_case_ = None snake_case_ = None snake_case_ = None if self.use_labels: snake_case_ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) snake_case_ = ids_tensor([self.batch_size] , self.num_choices ) snake_case_ = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def a ( self ): return MraConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , ) def a ( self ): snake_case_ = self.get_config() snake_case_ = 300 return config def a ( self ): ( snake_case_ ) = self.prepare_config_and_inputs() snake_case_ = True snake_case_ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) snake_case_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def a ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ): snake_case_ = MraModel(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() snake_case_ = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase ) snake_case_ = model(__UpperCAmelCase , token_type_ids=__UpperCAmelCase ) snake_case_ = model(__UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def a ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ): snake_case_ = True snake_case_ = MraModel(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() snake_case_ = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , ) snake_case_ = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , ) snake_case_ = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def a ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ): snake_case_ = MraForMaskedLM(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() snake_case_ = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def a ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ): snake_case_ = MraForQuestionAnswering(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() snake_case_ = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , start_positions=__UpperCAmelCase , end_positions=__UpperCAmelCase , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def a ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ): snake_case_ = self.num_labels snake_case_ = MraForSequenceClassification(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() snake_case_ = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def a ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ): snake_case_ = self.num_labels snake_case_ = MraForTokenClassification(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() snake_case_ = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def a ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ): snake_case_ = self.num_choices snake_case_ = MraForMultipleChoice(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() snake_case_ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() snake_case_ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() snake_case_ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() snake_case_ = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def a ( self ): snake_case_ = self.prepare_config_and_inputs() ( snake_case_ ) = config_and_inputs snake_case_ = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class lowercase ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ): __SCREAMING_SNAKE_CASE : Dict = ( ( MraModel, MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, ) if is_torch_available() else () ) __SCREAMING_SNAKE_CASE : str = False __SCREAMING_SNAKE_CASE : Union[str, Any] = False __SCREAMING_SNAKE_CASE : Optional[Any] = False __SCREAMING_SNAKE_CASE : int = False __SCREAMING_SNAKE_CASE : int = () def a ( self ): snake_case_ = MraModelTester(self ) snake_case_ = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37 ) def a ( self ): self.config_tester.run_common_tests() def a ( self ): snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCAmelCase ) def a ( self ): snake_case_ = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: snake_case_ = type self.model_tester.create_and_check_model(*__UpperCAmelCase ) def a ( self ): snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*__UpperCAmelCase ) def a ( self ): snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*__UpperCAmelCase ) def a ( self ): snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*__UpperCAmelCase ) def a ( self ): snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*__UpperCAmelCase ) def a ( self ): snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*__UpperCAmelCase ) @slow def a ( self ): for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: snake_case_ = MraModel.from_pretrained(__UpperCAmelCase ) self.assertIsNotNone(__UpperCAmelCase ) @unittest.skip(reason='MRA does not output attentions' ) def a ( self ): return @require_torch class lowercase ( unittest.TestCase ): @slow def a ( self ): snake_case_ = MraModel.from_pretrained('uw-madison/mra-base-512-4' ) snake_case_ = torch.arange(256 ).unsqueeze(0 ) with torch.no_grad(): snake_case_ = model(__UpperCAmelCase )[0] snake_case_ = torch.Size((1, 256, 768) ) self.assertEqual(output.shape , __UpperCAmelCase ) snake_case_ = torch.tensor( [[[-0.01_40, 0.08_30, -0.03_81], [0.15_46, 0.14_02, 0.02_20], [0.11_62, 0.08_51, 0.01_65]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=1e-4 ) ) @slow def a ( self ): snake_case_ = MraForMaskedLM.from_pretrained('uw-madison/mra-base-512-4' ) snake_case_ = torch.arange(256 ).unsqueeze(0 ) with torch.no_grad(): snake_case_ = model(__UpperCAmelCase )[0] snake_case_ = 5_0265 snake_case_ = torch.Size((1, 256, vocab_size) ) self.assertEqual(output.shape , __UpperCAmelCase ) snake_case_ = torch.tensor( [[[9.25_95, -3.60_38, 11.8819], [9.38_69, -3.26_93, 11.0956], [11.8524, -3.49_38, 13.1210]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=1e-4 ) ) @slow def a ( self ): snake_case_ = MraForMaskedLM.from_pretrained('uw-madison/mra-base-4096-8-d3' ) snake_case_ = torch.arange(4096 ).unsqueeze(0 ) with torch.no_grad(): snake_case_ = model(__UpperCAmelCase )[0] snake_case_ = 5_0265 snake_case_ = torch.Size((1, 4096, vocab_size) ) self.assertEqual(output.shape , __UpperCAmelCase ) snake_case_ = torch.tensor( [[[5.47_89, -2.35_64, 7.50_64], [7.90_67, -1.33_69, 9.96_68], [9.07_12, -1.81_06, 7.03_80]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=1e-4 ) )
285
'''simple docstring''' import unittest from transformers import is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow if is_torch_available(): import torch from transformers import XLMRobertaModel @require_sentencepiece @require_tokenizers @require_torch class lowerCAmelCase_( unittest.TestCase ): '''simple docstring''' @slow def UpperCAmelCase_ ( self ) -> Union[str, Any]: lowerCAmelCase__ : Tuple = XLMRobertaModel.from_pretrained("""xlm-roberta-base""" ) lowerCAmelCase__ : Optional[int] = torch.tensor([[0, 581, 1_0269, 83, 9_9942, 136, 6_0742, 23, 70, 8_0583, 1_8276, 2]] ) # The dog is cute and lives in the garden house lowerCAmelCase__ : str = torch.Size((1, 12, 768) ) # batch_size, sequence_length, embedding_vector_dim lowerCAmelCase__ : Dict = torch.tensor( [[-0.0_1_0_1, 0.1_2_1_8, -0.0_8_0_3, 0.0_8_0_1, 0.1_3_2_7, 0.0_7_7_6, -0.1_2_1_5, 0.2_3_8_3, 0.3_3_3_8, 0.3_1_0_6, 0.0_3_0_0, 0.0_2_5_2]] ) # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.eval() # expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1] with torch.no_grad(): lowerCAmelCase__ : Optional[int] = model(__UpperCAmelCase )["""last_hidden_state"""].detach() self.assertEqual(output.shape ,__UpperCAmelCase ) # compare the actual values for a slice of last dim self.assertTrue(torch.allclose(output[:, :, -1] ,__UpperCAmelCase ,atol=1E-3 ) ) @slow def UpperCAmelCase_ ( self ) -> int: lowerCAmelCase__ : Union[str, Any] = XLMRobertaModel.from_pretrained("""xlm-roberta-large""" ) lowerCAmelCase__ : Optional[Any] = torch.tensor([[0, 581, 1_0269, 83, 9_9942, 136, 6_0742, 23, 70, 8_0583, 1_8276, 2]] ) # The dog is cute and lives in the garden house lowerCAmelCase__ : Dict = torch.Size((1, 12, 1024) ) # batch_size, sequence_length, embedding_vector_dim lowerCAmelCase__ : Union[str, Any] = torch.tensor( [[-0.0_6_9_9, -0.0_3_1_8, 0.0_7_0_5, -0.1_2_4_1, 0.0_9_9_9, -0.0_5_2_0, 0.1_0_0_4, -0.1_8_3_8, -0.4_7_0_4, 0.1_4_3_7, 0.0_8_2_1, 0.0_1_2_6]] ) # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large') # xlmr.eval() # expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1] with torch.no_grad(): lowerCAmelCase__ : Union[str, Any] = model(__UpperCAmelCase )["""last_hidden_state"""].detach() self.assertEqual(output.shape ,__UpperCAmelCase ) # compare the actual values for a slice of last dim self.assertTrue(torch.allclose(output[:, :, -1] ,__UpperCAmelCase ,atol=1E-3 ) )
37
0
from __future__ import annotations def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ): if (voltage, current, resistance).count(0 ) != 1: raise ValueError('''One and only one argument must be 0''' ) if resistance < 0: raise ValueError('''Resistance cannot be negative''' ) if voltage == 0: return {"voltage": float(current * resistance )} elif current == 0: return {"current": voltage / resistance} elif resistance == 0: return {"resistance": voltage / current} else: raise ValueError('''Exactly one argument must be 0''' ) if __name__ == "__main__": import doctest doctest.testmod()
299
'''simple docstring''' from pickle import UnpicklingError import jax import jax.numpy as jnp import numpy as np from flax.serialization import from_bytes from flax.traverse_util import flatten_dict from ..utils import logging _lowerCAmelCase = logging.get_logger(__name__) def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ): """simple docstring""" try: with open(UpperCamelCase , """rb""" ) as flax_state_f: lowerCAmelCase__ : Union[str, Any] = from_bytes(UpperCamelCase , flax_state_f.read() ) except UnpicklingError as e: try: with open(UpperCamelCase ) as f: if f.read().startswith("""version""" ): raise OSError( """You seem to have cloned a repository without having git-lfs installed. Please""" """ install git-lfs and run `git lfs install` followed by `git lfs pull` in the""" """ folder you cloned.""" ) else: raise ValueError from e except (UnicodeDecodeError, ValueError): raise EnvironmentError(f"""Unable to convert {model_file} to Flax deserializable object. """ ) return load_flax_weights_in_pytorch_model(UpperCamelCase , UpperCamelCase ) def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ): """simple docstring""" try: import torch # noqa: F401 except ImportError: logger.error( """Loading Flax weights in PyTorch requires both PyTorch and Flax to be installed. Please see""" """ https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation""" """ instructions.""" ) raise # check if we have bf16 weights lowerCAmelCase__ : str = flatten_dict(jax.tree_util.tree_map(lambda UpperCamelCase : x.dtype == jnp.bfloataa , UpperCamelCase ) ).values() if any(UpperCamelCase ): # convert all weights to fp32 if they are bf16 since torch.from_numpy can-not handle bf16 # and bf16 is not fully supported in PT yet. logger.warning( """Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` """ """before loading those in PyTorch model.""" ) lowerCAmelCase__ : Dict = jax.tree_util.tree_map( lambda UpperCamelCase : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , UpperCamelCase ) lowerCAmelCase__ : Any = """""" lowerCAmelCase__ : Any = flatten_dict(UpperCamelCase , sep=""".""" ) lowerCAmelCase__ : Optional[int] = pt_model.state_dict() # keep track of unexpected & missing keys lowerCAmelCase__ : Optional[Any] = [] lowerCAmelCase__ : int = set(pt_model_dict.keys() ) for flax_key_tuple, flax_tensor in flax_state_dict.items(): lowerCAmelCase__ : Union[str, Any] = flax_key_tuple.split(""".""" ) if flax_key_tuple_array[-1] == "kernel" and flax_tensor.ndim == 4: lowerCAmelCase__ : Optional[int] = flax_key_tuple_array[:-1] + ["""weight"""] lowerCAmelCase__ : Any = jnp.transpose(UpperCamelCase , (3, 2, 0, 1) ) elif flax_key_tuple_array[-1] == "kernel": lowerCAmelCase__ : str = flax_key_tuple_array[:-1] + ["""weight"""] lowerCAmelCase__ : Any = flax_tensor.T elif flax_key_tuple_array[-1] == "scale": lowerCAmelCase__ : int = flax_key_tuple_array[:-1] + ["""weight"""] if "time_embedding" not in flax_key_tuple_array: for i, flax_key_tuple_string in enumerate(UpperCamelCase ): lowerCAmelCase__ : List[str] = ( flax_key_tuple_string.replace("""_0""" , """.0""" ) .replace("""_1""" , """.1""" ) .replace("""_2""" , """.2""" ) .replace("""_3""" , """.3""" ) .replace("""_4""" , """.4""" ) .replace("""_5""" , """.5""" ) .replace("""_6""" , """.6""" ) .replace("""_7""" , """.7""" ) .replace("""_8""" , """.8""" ) .replace("""_9""" , """.9""" ) ) lowerCAmelCase__ : Union[str, Any] = """.""".join(UpperCamelCase ) if flax_key in pt_model_dict: if flax_tensor.shape != pt_model_dict[flax_key].shape: raise ValueError( f"""Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected """ f"""to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.""" ) else: # add weight to pytorch dict lowerCAmelCase__ : int = np.asarray(UpperCamelCase ) if not isinstance(UpperCamelCase , np.ndarray ) else flax_tensor lowerCAmelCase__ : int = torch.from_numpy(UpperCamelCase ) # remove from missing keys missing_keys.remove(UpperCamelCase ) else: # weight is not expected by PyTorch model unexpected_keys.append(UpperCamelCase ) pt_model.load_state_dict(UpperCamelCase ) # re-transform missing_keys to list lowerCAmelCase__ : Optional[int] = list(UpperCamelCase ) if len(UpperCamelCase ) > 0: logger.warning( """Some weights of the Flax model were not used when initializing the PyTorch model""" f""" {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing""" f""" {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture""" """ (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This""" f""" IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect""" """ to be exactly identical (e.g. initializing a BertForSequenceClassification model from a""" """ FlaxBertForSequenceClassification model).""" ) if len(UpperCamelCase ) > 0: logger.warning( f"""Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly""" f""" initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to""" """ use it for predictions and inference.""" ) return pt_model
37
0
'''simple docstring''' import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( WavaVecaConformerConfig, WavaVecaConformerForCTC, WavaVecaConformerForPreTraining, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaProcessor, logging, ) logging.set_verbosity_info() a_ : Optional[int] = logging.get_logger(__name__) a_ : Optional[Any] = { """post_extract_proj""": """feature_projection.projection""", """encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""", """self_attn.linear_k""": """encoder.layers.*.self_attn.linear_k""", """self_attn.linear_v""": """encoder.layers.*.self_attn.linear_v""", """self_attn.linear_q""": """encoder.layers.*.self_attn.linear_q""", """self_attn.pos_bias_u""": """encoder.layers.*.self_attn.pos_bias_u""", """self_attn.pos_bias_v""": """encoder.layers.*.self_attn.pos_bias_v""", """self_attn.linear_out""": """encoder.layers.*.self_attn.linear_out""", """self_attn.linear_pos""": """encoder.layers.*.self_attn.linear_pos""", """self_attn.rotary_emb""": """encoder.embed_positions""", """self_attn_layer_norm""": """encoder.layers.*.self_attn_layer_norm""", """conv_module.pointwise_conv1""": """encoder.layers.*.conv_module.pointwise_conv1""", """conv_module.pointwise_conv2""": """encoder.layers.*.conv_module.pointwise_conv2""", """conv_module.depthwise_conv""": """encoder.layers.*.conv_module.depthwise_conv""", """conv_module.batch_norm""": """encoder.layers.*.conv_module.batch_norm""", """conv_module.layer_norm""": """encoder.layers.*.conv_module.layer_norm""", """ffn1.w_1""": """encoder.layers.*.ffn1.intermediate_dense""", """ffn1.w_2""": """encoder.layers.*.ffn1.output_dense""", """ffn1.layer_norm""": """encoder.layers.*.ffn1_layer_norm""", """ffn2.w_1""": """encoder.layers.*.ffn2.intermediate_dense""", """ffn2.w_2""": """encoder.layers.*.ffn2.output_dense""", """ffn2.layer_norm""": """encoder.layers.*.ffn2_layer_norm""", """final_layer_norm""": """encoder.layers.*.final_layer_norm""", """encoder.layer_norm""": """encoder.layer_norm""", """w2v_model.layer_norm""": """feature_projection.layer_norm""", """quantizer.weight_proj""": """quantizer.weight_proj""", """quantizer.vars""": """quantizer.codevectors""", """project_q""": """project_q""", """final_proj""": """project_hid""", """w2v_encoder.proj""": """lm_head""", """mask_emb""": """masked_spec_embed""", } a_ : Dict = [ """lm_head""", """quantizer.weight_proj""", """quantizer.codevectors""", """project_q""", """project_hid""", ] def __snake_case ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : List[Any] ): for attribute in key.split("." ): lowerCamelCase_ = getattr(UpperCAmelCase_ , UpperCAmelCase_ ) if weight_type is not None: lowerCamelCase_ = getattr(UpperCAmelCase_ , UpperCAmelCase_ ).shape else: lowerCamelCase_ = hf_pointer.shape if hf_shape != value.shape: raise ValueError( F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be''' F''' {value.shape} for {full_name}''' ) if weight_type == "weight": lowerCamelCase_ = value elif weight_type == "weight_g": lowerCamelCase_ = value elif weight_type == "weight_v": lowerCamelCase_ = value elif weight_type == "bias": lowerCamelCase_ = value elif weight_type == "running_mean": lowerCamelCase_ = value elif weight_type == "running_var": lowerCamelCase_ = value elif weight_type == "num_batches_tracked": lowerCamelCase_ = value elif weight_type == "inv_freq": lowerCamelCase_ = value else: lowerCamelCase_ = value logger.info(F'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' ) def __snake_case ( UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Any ): lowerCamelCase_ = [] lowerCamelCase_ = fairseq_model.state_dict() lowerCamelCase_ = hf_model.wavaveca_conformer.feature_extractor for name, value in fairseq_dict.items(): lowerCamelCase_ = False if "conv_layers" in name: load_conv_layer( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , hf_model.config.feat_extract_norm == "group" , ) lowerCamelCase_ = True else: for key, mapped_key in MAPPING.items(): lowerCamelCase_ = """wav2vec2_conformer.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]: lowerCamelCase_ = True if "*" in mapped_key: lowerCamelCase_ = name.split(UpperCAmelCase_ )[0].split("." )[-2] lowerCamelCase_ = mapped_key.replace("*" , UpperCAmelCase_ ) if "pos_bias_u" in name: lowerCamelCase_ = None elif "pos_bias_v" in name: lowerCamelCase_ = None elif "weight_g" in name: lowerCamelCase_ = """weight_g""" elif "weight_v" in name: lowerCamelCase_ = """weight_v""" elif "bias" in name: lowerCamelCase_ = """bias""" elif "weight" in name: # TODO: don't match quantizer.weight_proj lowerCamelCase_ = """weight""" elif "running_mean" in name: lowerCamelCase_ = """running_mean""" elif "inv_freq" in name: lowerCamelCase_ = """inv_freq""" elif "running_var" in name: lowerCamelCase_ = """running_var""" elif "num_batches_tracked" in name: lowerCamelCase_ = """num_batches_tracked""" else: lowerCamelCase_ = None set_recursively(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) continue if not is_used: unused_weights.append(UpperCAmelCase_ ) logger.warning(F'''Unused weights: {unused_weights}''' ) def __snake_case ( UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Tuple ): lowerCamelCase_ = full_name.split("conv_layers." )[-1] lowerCamelCase_ = name.split("." ) lowerCamelCase_ = int(items[0] ) lowerCamelCase_ = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' ) lowerCamelCase_ = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' ) lowerCamelCase_ = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' ) lowerCamelCase_ = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' ) lowerCamelCase_ = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) else: unused_weights.append(UpperCAmelCase_ ) @torch.no_grad() def __snake_case ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : int=None , UpperCAmelCase_ : str=None , UpperCAmelCase_ : Tuple=True ): if config_path is not None: lowerCamelCase_ = WavaVecaConformerConfig.from_pretrained(UpperCAmelCase_ , hidden_act="swish" ) else: lowerCamelCase_ = WavaVecaConformerConfig() if "rope" in checkpoint_path: lowerCamelCase_ = """rotary""" if is_finetuned: if dict_path: lowerCamelCase_ = Dictionary.load(UpperCAmelCase_ ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq lowerCamelCase_ = target_dict.pad_index lowerCamelCase_ = target_dict.bos_index lowerCamelCase_ = target_dict.eos_index lowerCamelCase_ = len(target_dict.symbols ) lowerCamelCase_ = os.path.join(UpperCAmelCase_ , "vocab.json" ) if not os.path.isdir(UpperCAmelCase_ ): logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(UpperCAmelCase_ ) ) return os.makedirs(UpperCAmelCase_ , exist_ok=UpperCAmelCase_ ) lowerCamelCase_ = target_dict.indices # fairseq has the <pad> and <s> switched lowerCamelCase_ = 0 lowerCamelCase_ = 1 with open(UpperCAmelCase_ , "w" , encoding="utf-8" ) as vocab_handle: json.dump(UpperCAmelCase_ , UpperCAmelCase_ ) lowerCamelCase_ = WavaVecaCTCTokenizer( UpperCAmelCase_ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=UpperCAmelCase_ , ) lowerCamelCase_ = True if config.feat_extract_norm == """layer""" else False lowerCamelCase_ = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ , ) lowerCamelCase_ = WavaVecaProcessor(feature_extractor=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ ) processor.save_pretrained(UpperCAmelCase_ ) lowerCamelCase_ = WavaVecaConformerForCTC(UpperCAmelCase_ ) else: lowerCamelCase_ = WavaVecaConformerForPreTraining(UpperCAmelCase_ ) if is_finetuned: lowerCamelCase_ = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} ) else: lowerCamelCase_ = argparse.Namespace(task="audio_pretraining" ) lowerCamelCase_ = fairseq.tasks.setup_task(UpperCAmelCase_ ) lowerCamelCase_ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=UpperCAmelCase_ ) lowerCamelCase_ = model[0].eval() recursively_load_weights(UpperCAmelCase_ , UpperCAmelCase_ , not is_finetuned ) hf_wavavec.save_pretrained(UpperCAmelCase_ ) if __name__ == "__main__": a_ : Any = argparse.ArgumentParser() parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""") parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") parser.add_argument( """--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not""" ) a_ : Dict = parser.parse_args() convert_wavaveca_conformer_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
55
'''simple docstring''' import inspect import unittest import numpy as np from tests.test_modeling_common import floats_tensor from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel if is_vision_available(): from transformers import MaskFormerImageProcessor if is_vision_available(): from PIL import Image class lowerCAmelCase_: '''simple docstring''' def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase=2 ,__UpperCAmelCase=True ,__UpperCAmelCase=False ,__UpperCAmelCase=10 ,__UpperCAmelCase=3 ,__UpperCAmelCase=32 * 4 ,__UpperCAmelCase=32 * 6 ,__UpperCAmelCase=4 ,__UpperCAmelCase=32 ,) -> str: lowerCAmelCase__ : Optional[int] = parent lowerCAmelCase__ : Optional[int] = batch_size lowerCAmelCase__ : Optional[int] = is_training lowerCAmelCase__ : Dict = use_auxiliary_loss lowerCAmelCase__ : Union[str, Any] = num_queries lowerCAmelCase__ : str = num_channels lowerCAmelCase__ : List[str] = min_size lowerCAmelCase__ : int = max_size lowerCAmelCase__ : Optional[Any] = num_labels lowerCAmelCase__ : List[Any] = mask_feature_size def UpperCAmelCase_ ( self ) -> Tuple: lowerCAmelCase__ : str = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to( __UpperCAmelCase ) lowerCAmelCase__ : str = torch.ones([self.batch_size, self.min_size, self.max_size] ,device=__UpperCAmelCase ) lowerCAmelCase__ : Any = ( torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] ,device=__UpperCAmelCase ) > 0.5 ).float() lowerCAmelCase__ : Optional[int] = (torch.rand((self.batch_size, self.num_labels) ,device=__UpperCAmelCase ) > 0.5).long() lowerCAmelCase__ : Any = self.get_config() return config, pixel_values, pixel_mask, mask_labels, class_labels def UpperCAmelCase_ ( self ) -> Dict: return MaskFormerConfig.from_backbone_and_decoder_configs( backbone_config=SwinConfig( depths=[1, 1, 1, 1] ,) ,decoder_config=DetrConfig( decoder_ffn_dim=128 ,num_queries=self.num_queries ,decoder_attention_heads=2 ,d_model=self.mask_feature_size ,) ,mask_feature_size=self.mask_feature_size ,fpn_feature_size=self.mask_feature_size ,num_channels=self.num_channels ,num_labels=self.num_labels ,) def UpperCAmelCase_ ( self ) -> Optional[int]: lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : List[str] = self.prepare_config_and_inputs() lowerCAmelCase__ : List[str] = {"""pixel_values""": pixel_values, """pixel_mask""": pixel_mask} return config, inputs_dict def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ) -> Any: lowerCAmelCase__ : Optional[int] = output.encoder_hidden_states lowerCAmelCase__ : Optional[int] = output.pixel_decoder_hidden_states lowerCAmelCase__ : Dict = output.transformer_decoder_hidden_states self.parent.assertTrue(len(__UpperCAmelCase ) ,len(config.backbone_config.depths ) ) self.parent.assertTrue(len(__UpperCAmelCase ) ,len(config.backbone_config.depths ) ) self.parent.assertTrue(len(__UpperCAmelCase ) ,config.decoder_config.decoder_layers ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase=False ) -> Optional[Any]: with torch.no_grad(): lowerCAmelCase__ : int = MaskFormerModel(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() lowerCAmelCase__ : str = model(pixel_values=__UpperCAmelCase ,pixel_mask=__UpperCAmelCase ) lowerCAmelCase__ : int = model(__UpperCAmelCase ,output_hidden_states=__UpperCAmelCase ) # the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the # encoder and pixel decoder self.parent.assertEqual( output.transformer_decoder_last_hidden_state.shape ,(self.batch_size, self.num_queries, self.mask_feature_size) ,) # let's ensure the other two hidden state exists self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(output.encoder_last_hidden_state is not None ) if output_hidden_states: self.check_output_hidden_state(__UpperCAmelCase ,__UpperCAmelCase ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> Optional[int]: lowerCAmelCase__ : Dict = MaskFormerForInstanceSegmentation(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() def comm_check_on_output(__UpperCAmelCase ): # let's still check that all the required stuff is there self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.encoder_last_hidden_state is not None ) # okay, now we need to check the logits shape # due to the encoder compression, masks have a //4 spatial size self.parent.assertEqual( result.masks_queries_logits.shape ,(self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) ,) # + 1 for null class self.parent.assertEqual( result.class_queries_logits.shape ,(self.batch_size, self.num_queries, self.num_labels + 1) ) with torch.no_grad(): lowerCAmelCase__ : List[Any] = model(pixel_values=__UpperCAmelCase ,pixel_mask=__UpperCAmelCase ) lowerCAmelCase__ : Dict = model(__UpperCAmelCase ) comm_check_on_output(__UpperCAmelCase ) lowerCAmelCase__ : Optional[int] = model( pixel_values=__UpperCAmelCase ,pixel_mask=__UpperCAmelCase ,mask_labels=__UpperCAmelCase ,class_labels=__UpperCAmelCase ) comm_check_on_output(__UpperCAmelCase ) self.parent.assertTrue(result.loss is not None ) self.parent.assertEqual(result.loss.shape ,torch.Size([1] ) ) @require_torch class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ): '''simple docstring''' __lowercase : Optional[Any] = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else () __lowercase : int = ( {'''feature-extraction''': MaskFormerModel, '''image-segmentation''': MaskFormerForInstanceSegmentation} if is_torch_available() else {} ) __lowercase : Union[str, Any] = False __lowercase : Dict = False __lowercase : Tuple = False __lowercase : List[Any] = False def UpperCAmelCase_ ( self ) -> Optional[int]: lowerCAmelCase__ : str = MaskFormerModelTester(self ) lowerCAmelCase__ : List[Any] = ConfigTester(self ,config_class=__UpperCAmelCase ,has_text_modality=__UpperCAmelCase ) def UpperCAmelCase_ ( self ) -> List[str]: self.config_tester.run_common_tests() def UpperCAmelCase_ ( self ) -> Union[str, Any]: lowerCAmelCase__ , lowerCAmelCase__ : str = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskformer_model(__UpperCAmelCase ,**__UpperCAmelCase ,output_hidden_states=__UpperCAmelCase ) def UpperCAmelCase_ ( self ) -> Optional[int]: lowerCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*__UpperCAmelCase ) @unittest.skip(reason="""MaskFormer does not use inputs_embeds""" ) def UpperCAmelCase_ ( self ) -> List[Any]: pass @unittest.skip(reason="""MaskFormer does not have a get_input_embeddings method""" ) def UpperCAmelCase_ ( self ) -> str: pass @unittest.skip(reason="""MaskFormer is not a generative model""" ) def UpperCAmelCase_ ( self ) -> Any: pass @unittest.skip(reason="""MaskFormer does not use token embeddings""" ) def UpperCAmelCase_ ( self ) -> List[str]: pass @require_torch_multi_gpu @unittest.skip( reason="""MaskFormer has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" ) def UpperCAmelCase_ ( self ) -> Union[str, Any]: pass @unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" ) def UpperCAmelCase_ ( self ) -> List[str]: pass def UpperCAmelCase_ ( self ) -> Tuple: lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase__ : str = model_class(__UpperCAmelCase ) lowerCAmelCase__ : Dict = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCAmelCase__ : Dict = [*signature.parameters.keys()] lowerCAmelCase__ : Tuple = ["""pixel_values"""] self.assertListEqual(arg_names[:1] ,__UpperCAmelCase ) @slow def UpperCAmelCase_ ( self ) -> Union[str, Any]: for model_name in ["facebook/maskformer-swin-small-coco"]: lowerCAmelCase__ : List[str] = MaskFormerModel.from_pretrained(__UpperCAmelCase ) self.assertIsNotNone(__UpperCAmelCase ) def UpperCAmelCase_ ( self ) -> str: lowerCAmelCase__ : List[Any] = (self.model_tester.min_size,) * 2 lowerCAmelCase__ : Any = { """pixel_values""": torch.randn((2, 3, *size) ,device=__UpperCAmelCase ), """mask_labels""": torch.randn((2, 10, *size) ,device=__UpperCAmelCase ), """class_labels""": torch.zeros(2 ,10 ,device=__UpperCAmelCase ).long(), } lowerCAmelCase__ : Tuple = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(__UpperCAmelCase ) lowerCAmelCase__ : Union[str, Any] = model(**__UpperCAmelCase ) self.assertTrue(outputs.loss is not None ) def UpperCAmelCase_ ( self ) -> str: lowerCAmelCase__ , lowerCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskformer_model(__UpperCAmelCase ,**__UpperCAmelCase ,output_hidden_states=__UpperCAmelCase ) def UpperCAmelCase_ ( self ) -> Tuple: lowerCAmelCase__ , lowerCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase__ : int = model_class(__UpperCAmelCase ).to(__UpperCAmelCase ) lowerCAmelCase__ : List[Any] = model(**__UpperCAmelCase ,output_attentions=__UpperCAmelCase ) self.assertTrue(outputs.attentions is not None ) def UpperCAmelCase_ ( self ) -> int: if not self.model_tester.is_training: return # only MaskFormerForInstanceSegmentation has the loss lowerCAmelCase__ : Dict = self.all_model_classes[1] lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs() lowerCAmelCase__ : List[Any] = model_class(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.train() lowerCAmelCase__ : List[str] = model(__UpperCAmelCase ,mask_labels=__UpperCAmelCase ,class_labels=__UpperCAmelCase ).loss loss.backward() def UpperCAmelCase_ ( self ) -> List[str]: # only MaskFormerForInstanceSegmentation has the loss lowerCAmelCase__ : Tuple = self.all_model_classes[1] lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs() lowerCAmelCase__ : Union[str, Any] = True lowerCAmelCase__ : Tuple = True lowerCAmelCase__ : Optional[Any] = model_class(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.train() lowerCAmelCase__ : Dict = model(__UpperCAmelCase ,mask_labels=__UpperCAmelCase ,class_labels=__UpperCAmelCase ) lowerCAmelCase__ : Optional[Any] = outputs.encoder_hidden_states[0] encoder_hidden_states.retain_grad() lowerCAmelCase__ : str = outputs.pixel_decoder_hidden_states[0] pixel_decoder_hidden_states.retain_grad() # we requires_grad=True in inputs_embeds (line 2152), the original implementation don't lowerCAmelCase__ : Union[str, Any] = outputs.transformer_decoder_hidden_states[0] transformer_decoder_hidden_states.retain_grad() lowerCAmelCase__ : List[Any] = outputs.attentions[0] attentions.retain_grad() outputs.loss.backward(retain_graph=__UpperCAmelCase ) self.assertIsNotNone(encoder_hidden_states.grad ) self.assertIsNotNone(pixel_decoder_hidden_states.grad ) self.assertIsNotNone(transformer_decoder_hidden_states.grad ) self.assertIsNotNone(attentions.grad ) _lowerCAmelCase = 1e-4 def _SCREAMING_SNAKE_CASE ( ): """simple docstring""" lowerCAmelCase__ : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_vision @slow class lowerCAmelCase_( unittest.TestCase ): '''simple docstring''' @cached_property def UpperCAmelCase_ ( self ) -> List[Any]: return ( MaskFormerImageProcessor.from_pretrained("""facebook/maskformer-swin-small-coco""" ) if is_vision_available() else None ) def UpperCAmelCase_ ( self ) -> Any: lowerCAmelCase__ : Any = MaskFormerModel.from_pretrained("""facebook/maskformer-swin-small-coco""" ).to(__UpperCAmelCase ) lowerCAmelCase__ : str = self.default_image_processor lowerCAmelCase__ : str = prepare_img() lowerCAmelCase__ : Optional[int] = image_processor(__UpperCAmelCase ,return_tensors="""pt""" ).to(__UpperCAmelCase ) lowerCAmelCase__ : Dict = inputs["""pixel_values"""].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(__UpperCAmelCase ,(1, 3, 800, 1088) ) with torch.no_grad(): lowerCAmelCase__ : Union[str, Any] = model(**__UpperCAmelCase ) lowerCAmelCase__ : Optional[Any] = torch.tensor( [[-0.0_4_8_2, 0.9_2_2_8, 0.4_9_5_1], [-0.2_5_4_7, 0.8_0_1_7, 0.8_5_2_7], [-0.0_0_6_9, 0.3_3_8_5, -0.0_0_8_9]] ).to(__UpperCAmelCase ) self.assertTrue( torch.allclose( outputs.encoder_last_hidden_state[0, 0, :3, :3] ,__UpperCAmelCase ,atol=__UpperCAmelCase ) ) lowerCAmelCase__ : Dict = torch.tensor( [[-0.8_4_2_2, -0.8_4_3_4, -0.9_7_1_8], [-1.0_1_4_4, -0.5_5_6_5, -0.4_1_9_5], [-1.0_0_3_8, -0.4_4_8_4, -0.1_9_6_1]] ).to(__UpperCAmelCase ) self.assertTrue( torch.allclose( outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] ,__UpperCAmelCase ,atol=__UpperCAmelCase ) ) lowerCAmelCase__ : Optional[int] = torch.tensor( [[0.2_8_5_2, -0.0_1_5_9, 0.9_7_3_5], [0.6_2_5_4, 0.1_8_5_8, 0.8_5_2_9], [-0.0_6_8_0, -0.4_1_1_6, 1.8_4_1_3]] ).to(__UpperCAmelCase ) self.assertTrue( torch.allclose( outputs.transformer_decoder_last_hidden_state[0, :3, :3] ,__UpperCAmelCase ,atol=__UpperCAmelCase ) ) def UpperCAmelCase_ ( self ) -> Optional[Any]: lowerCAmelCase__ : List[Any] = ( MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-swin-small-coco""" ) .to(__UpperCAmelCase ) .eval() ) lowerCAmelCase__ : Optional[Any] = self.default_image_processor lowerCAmelCase__ : List[str] = prepare_img() lowerCAmelCase__ : str = image_processor(__UpperCAmelCase ,return_tensors="""pt""" ).to(__UpperCAmelCase ) lowerCAmelCase__ : Optional[int] = inputs["""pixel_values"""].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(__UpperCAmelCase ,(1, 3, 800, 1088) ) with torch.no_grad(): lowerCAmelCase__ : List[Any] = model(**__UpperCAmelCase ) # masks_queries_logits lowerCAmelCase__ : Optional[int] = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape ,(1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) ,) lowerCAmelCase__ : Optional[int] = [ [-1.3_7_3_7_1_2_4, -1.7_7_2_4_9_3_7, -1.9_3_6_4_2_3_3], [-1.5_9_7_7_2_8_1, -1.9_8_6_7_9_3_9, -2.1_5_2_3_6_9_5], [-1.5_7_9_5_3_9_8, -1.9_2_6_9_8_3_2, -2.0_9_3_9_4_2], ] lowerCAmelCase__ : Optional[int] = torch.tensor(__UpperCAmelCase ).to(__UpperCAmelCase ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] ,__UpperCAmelCase ,atol=__UpperCAmelCase ) ) # class_queries_logits lowerCAmelCase__ : Tuple = outputs.class_queries_logits self.assertEqual( class_queries_logits.shape ,(1, model.config.decoder_config.num_queries, model.config.num_labels + 1) ) lowerCAmelCase__ : Union[str, Any] = torch.tensor( [ [1.65_12E00, -5.25_72E00, -3.35_19E00], [3.61_69E-02, -5.90_25E00, -2.93_13E00], [1.07_66E-04, -7.76_30E00, -5.12_63E00], ] ).to(__UpperCAmelCase ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] ,__UpperCAmelCase ,atol=__UpperCAmelCase ) ) def UpperCAmelCase_ ( self ) -> str: lowerCAmelCase__ : List[Any] = ( MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-resnet101-coco-stuff""" ) .to(__UpperCAmelCase ) .eval() ) lowerCAmelCase__ : Optional[Any] = self.default_image_processor lowerCAmelCase__ : int = prepare_img() lowerCAmelCase__ : Optional[Any] = image_processor(__UpperCAmelCase ,return_tensors="""pt""" ).to(__UpperCAmelCase ) lowerCAmelCase__ : str = inputs["""pixel_values"""].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(__UpperCAmelCase ,(1, 3, 800, 1088) ) with torch.no_grad(): lowerCAmelCase__ : str = model(**__UpperCAmelCase ) # masks_queries_logits lowerCAmelCase__ : Optional[Any] = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape ,(1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) ,) lowerCAmelCase__ : int = [[-0.9_0_4_6, -2.6_3_6_6, -4.6_0_6_2], [-3.4_1_7_9, -5.7_8_9_0, -8.8_0_5_7], [-4.9_1_7_9, -7.6_5_6_0, -1_0.7_7_1_1]] lowerCAmelCase__ : List[str] = torch.tensor(__UpperCAmelCase ).to(__UpperCAmelCase ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] ,__UpperCAmelCase ,atol=__UpperCAmelCase ) ) # class_queries_logits lowerCAmelCase__ : Optional[Any] = outputs.class_queries_logits self.assertEqual( class_queries_logits.shape ,(1, model.config.decoder_config.num_queries, model.config.num_labels + 1) ) lowerCAmelCase__ : Tuple = torch.tensor( [[4.7_1_8_8, -3.2_5_8_5, -2.8_8_5_7], [6.6_8_7_1, -2.9_1_8_1, -1.2_4_8_7], [7.2_4_4_9, -2.2_7_6_4, -2.1_8_7_4]] ).to(__UpperCAmelCase ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] ,__UpperCAmelCase ,atol=__UpperCAmelCase ) ) def UpperCAmelCase_ ( self ) -> Optional[Any]: lowerCAmelCase__ : str = ( MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-swin-small-coco""" ) .to(__UpperCAmelCase ) .eval() ) lowerCAmelCase__ : Dict = self.default_image_processor lowerCAmelCase__ : Union[str, Any] = image_processor( [np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] ,segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] ,return_tensors="""pt""" ,) lowerCAmelCase__ : Tuple = inputs["""pixel_values"""].to(__UpperCAmelCase ) lowerCAmelCase__ : Union[str, Any] = [el.to(__UpperCAmelCase ) for el in inputs["""mask_labels"""]] lowerCAmelCase__ : Union[str, Any] = [el.to(__UpperCAmelCase ) for el in inputs["""class_labels"""]] with torch.no_grad(): lowerCAmelCase__ : Any = model(**__UpperCAmelCase ) self.assertTrue(outputs.loss is not None )
37
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) _lowerCAmelCase :Optional[Any] = { 'configuration_convnext': ['CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ConvNextConfig', 'ConvNextOnnxConfig'] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase :Optional[Any] = ['ConvNextFeatureExtractor'] _lowerCAmelCase :int = ['ConvNextImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase :Dict = [ 'CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST', 'ConvNextForImageClassification', 'ConvNextModel', 'ConvNextPreTrainedModel', 'ConvNextBackbone', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase :List[str] = [ 'TFConvNextForImageClassification', 'TFConvNextModel', 'TFConvNextPreTrainedModel', ] if TYPE_CHECKING: from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_convnext import ConvNextFeatureExtractor from .image_processing_convnext import ConvNextImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_convnext import ( CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST, ConvNextBackbone, ConvNextForImageClassification, ConvNextModel, ConvNextPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel else: import sys _lowerCAmelCase :List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure)
263
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices _lowerCAmelCase = logging.get_logger(__name__) _lowerCAmelCase = { '''microsoft/focalnet-tiny''': '''https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json''', } class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): '''simple docstring''' __lowercase : Optional[Any] = '''focalnet''' def __init__( self ,__UpperCAmelCase=224 ,__UpperCAmelCase=4 ,__UpperCAmelCase=3 ,__UpperCAmelCase=96 ,__UpperCAmelCase=False ,__UpperCAmelCase=[192, 384, 768, 768] ,__UpperCAmelCase=[2, 2, 6, 2] ,__UpperCAmelCase=[2, 2, 2, 2] ,__UpperCAmelCase=[3, 3, 3, 3] ,__UpperCAmelCase="gelu" ,__UpperCAmelCase=4.0 ,__UpperCAmelCase=0.0 ,__UpperCAmelCase=0.1 ,__UpperCAmelCase=False ,__UpperCAmelCase=1E-4 ,__UpperCAmelCase=False ,__UpperCAmelCase=False ,__UpperCAmelCase=False ,__UpperCAmelCase=0.0_2 ,__UpperCAmelCase=1E-5 ,__UpperCAmelCase=32 ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,**__UpperCAmelCase ,) -> Optional[Any]: super().__init__(**__UpperCAmelCase ) lowerCAmelCase__ : Dict = image_size lowerCAmelCase__ : int = patch_size lowerCAmelCase__ : str = num_channels lowerCAmelCase__ : Dict = embed_dim lowerCAmelCase__ : List[str] = use_conv_embed lowerCAmelCase__ : List[Any] = hidden_sizes lowerCAmelCase__ : Dict = depths lowerCAmelCase__ : List[str] = focal_levels lowerCAmelCase__ : List[str] = focal_windows lowerCAmelCase__ : Dict = hidden_act lowerCAmelCase__ : Dict = mlp_ratio lowerCAmelCase__ : Tuple = hidden_dropout_prob lowerCAmelCase__ : Tuple = drop_path_rate lowerCAmelCase__ : Dict = use_layerscale lowerCAmelCase__ : Optional[Any] = layerscale_value lowerCAmelCase__ : str = use_post_layernorm lowerCAmelCase__ : Union[str, Any] = use_post_layernorm_in_modulation lowerCAmelCase__ : int = normalize_modulator lowerCAmelCase__ : Optional[Any] = initializer_range lowerCAmelCase__ : List[str] = layer_norm_eps lowerCAmelCase__ : List[Any] = encoder_stride lowerCAmelCase__ : Dict = ["""stem"""] + [F"""stage{idx}""" for idx in range(1 ,len(self.depths ) + 1 )] lowerCAmelCase__ , lowerCAmelCase__ : Any = get_aligned_output_features_output_indices( out_features=__UpperCAmelCase ,out_indices=__UpperCAmelCase ,stage_names=self.stage_names )
37
0
from multiprocessing import Lock, Pipe, Process # lock used to ensure that two processes do not access a pipe at the same time UpperCAmelCase : Tuple = Lock() def __lowerCamelCase ( lowerCamelCase__ : List[Any] , lowerCamelCase__ : Tuple , lowerCamelCase__ : Dict , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : int , lowerCamelCase__ : int , lowerCamelCase__ : List[str] ): '''simple docstring''' global process_lock # we perform n swaps since after n swaps we know we are sorted # we *could* stop early if we are sorted already, but it takes as long to # find out we are sorted as it does to sort the list with this algorithm for i in range(0 , 10 ): if (i + position) % 2 == 0 and r_send is not None: # send your value to your right neighbor process_lock.acquire() r_send[1].send(lowerCamelCase__ ) process_lock.release() # receive your right neighbor's value process_lock.acquire() lowerCamelCase = rr_cv[0].recv() process_lock.release() # take the lower value since you are on the left lowerCamelCase = min(lowerCamelCase__ , lowerCamelCase__ ) elif (i + position) % 2 != 0 and l_send is not None: # send your value to your left neighbor process_lock.acquire() l_send[1].send(lowerCamelCase__ ) process_lock.release() # receive your left neighbor's value process_lock.acquire() lowerCamelCase = lr_cv[0].recv() process_lock.release() # take the higher value since you are on the right lowerCamelCase = max(lowerCamelCase__ , lowerCamelCase__ ) # after all swaps are performed, send the values back to main result_pipe[1].send(lowerCamelCase__ ) def __lowerCamelCase ( lowerCamelCase__ : Any ): '''simple docstring''' lowerCamelCase = [] lowerCamelCase = [] # initialize the list of pipes where the values will be retrieved for _ in arr: result_pipe.append(Pipe() ) # creates the processes # the first and last process only have one neighbor so they are made outside # of the loop lowerCamelCase = Pipe() lowerCamelCase = Pipe() process_array_.append( Process( target=lowerCamelCase__ , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) ) lowerCamelCase = temp_rs lowerCamelCase = temp_rr for i in range(1 , len(lowerCamelCase__ ) - 1 ): lowerCamelCase = Pipe() lowerCamelCase = Pipe() process_array_.append( Process( target=lowerCamelCase__ , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) ) lowerCamelCase = temp_rs lowerCamelCase = temp_rr process_array_.append( Process( target=lowerCamelCase__ , args=( len(lowerCamelCase__ ) - 1, arr[len(lowerCamelCase__ ) - 1], temp_ls, None, temp_lr, None, result_pipe[len(lowerCamelCase__ ) - 1], ) , ) ) # start the processes for p in process_array_: p.start() # wait for the processes to end and write their values to the list for p in range(0 , len(lowerCamelCase__ ) ): lowerCamelCase = result_pipe[p][0].recv() process_array_[p].join() return arr def __lowerCamelCase ( ): '''simple docstring''' lowerCamelCase = list(range(10 , 0 , -1 ) ) print("""Initial List""" ) print(*lowerCamelCase__ ) lowerCamelCase = odd_even_transposition(lowerCamelCase__ ) print("""Sorted List\n""" ) print(*lowerCamelCase__ ) if __name__ == "__main__": main()
252
'''simple docstring''' import argparse import os.path as osp import re import torch from safetensors.torch import load_file, save_file # =================# # UNet Conversion # # =================# _lowerCAmelCase = [ # (stable-diffusion, HF Diffusers) ('''time_embed.0.weight''', '''time_embedding.linear_1.weight'''), ('''time_embed.0.bias''', '''time_embedding.linear_1.bias'''), ('''time_embed.2.weight''', '''time_embedding.linear_2.weight'''), ('''time_embed.2.bias''', '''time_embedding.linear_2.bias'''), ('''input_blocks.0.0.weight''', '''conv_in.weight'''), ('''input_blocks.0.0.bias''', '''conv_in.bias'''), ('''out.0.weight''', '''conv_norm_out.weight'''), ('''out.0.bias''', '''conv_norm_out.bias'''), ('''out.2.weight''', '''conv_out.weight'''), ('''out.2.bias''', '''conv_out.bias'''), ] _lowerCAmelCase = [ # (stable-diffusion, HF Diffusers) ('''in_layers.0''', '''norm1'''), ('''in_layers.2''', '''conv1'''), ('''out_layers.0''', '''norm2'''), ('''out_layers.3''', '''conv2'''), ('''emb_layers.1''', '''time_emb_proj'''), ('''skip_connection''', '''conv_shortcut'''), ] _lowerCAmelCase = [] # hardcoded number of downblocks and resnets/attentions... # would need smarter logic for other networks. for i in range(4): # loop over downblocks/upblocks for j in range(2): # loop over resnets/attentions for downblocks _lowerCAmelCase = F"""down_blocks.{i}.resnets.{j}.""" _lowerCAmelCase = F"""input_blocks.{3*i + j + 1}.0.""" unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix)) if i < 3: # no attention layers in down_blocks.3 _lowerCAmelCase = F"""down_blocks.{i}.attentions.{j}.""" _lowerCAmelCase = F"""input_blocks.{3*i + j + 1}.1.""" unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix)) for j in range(3): # loop over resnets/attentions for upblocks _lowerCAmelCase = F"""up_blocks.{i}.resnets.{j}.""" _lowerCAmelCase = F"""output_blocks.{3*i + j}.0.""" unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix)) if i > 0: # no attention layers in up_blocks.0 _lowerCAmelCase = F"""up_blocks.{i}.attentions.{j}.""" _lowerCAmelCase = F"""output_blocks.{3*i + j}.1.""" unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix)) if i < 3: # no downsample in down_blocks.3 _lowerCAmelCase = F"""down_blocks.{i}.downsamplers.0.conv.""" _lowerCAmelCase = F"""input_blocks.{3*(i+1)}.0.op.""" unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix)) # no upsample in up_blocks.3 _lowerCAmelCase = F"""up_blocks.{i}.upsamplers.0.""" _lowerCAmelCase = F"""output_blocks.{3*i + 2}.{1 if i == 0 else 2}.""" unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix)) _lowerCAmelCase = '''mid_block.attentions.0.''' _lowerCAmelCase = '''middle_block.1.''' unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix)) for j in range(2): _lowerCAmelCase = F"""mid_block.resnets.{j}.""" _lowerCAmelCase = F"""middle_block.{2*j}.""" unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix)) def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" lowerCAmelCase__ : Any = {k: k for k in unet_state_dict.keys()} for sd_name, hf_name in unet_conversion_map: lowerCAmelCase__ : Optional[int] = sd_name for k, v in mapping.items(): if "resnets" in k: for sd_part, hf_part in unet_conversion_map_resnet: lowerCAmelCase__ : Any = v.replace(UpperCamelCase , UpperCamelCase ) lowerCAmelCase__ : List[Any] = v for k, v in mapping.items(): for sd_part, hf_part in unet_conversion_map_layer: lowerCAmelCase__ : List[Any] = v.replace(UpperCamelCase , UpperCamelCase ) lowerCAmelCase__ : Optional[Any] = v lowerCAmelCase__ : Tuple = {v: unet_state_dict[k] for k, v in mapping.items()} return new_state_dict # ================# # VAE Conversion # # ================# _lowerCAmelCase = [ # (stable-diffusion, HF Diffusers) ('''nin_shortcut''', '''conv_shortcut'''), ('''norm_out''', '''conv_norm_out'''), ('''mid.attn_1.''', '''mid_block.attentions.0.'''), ] for i in range(4): # down_blocks have two resnets for j in range(2): _lowerCAmelCase = F"""encoder.down_blocks.{i}.resnets.{j}.""" _lowerCAmelCase = F"""encoder.down.{i}.block.{j}.""" vae_conversion_map.append((sd_down_prefix, hf_down_prefix)) if i < 3: _lowerCAmelCase = F"""down_blocks.{i}.downsamplers.0.""" _lowerCAmelCase = F"""down.{i}.downsample.""" vae_conversion_map.append((sd_downsample_prefix, hf_downsample_prefix)) _lowerCAmelCase = F"""up_blocks.{i}.upsamplers.0.""" _lowerCAmelCase = F"""up.{3-i}.upsample.""" vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix)) # up_blocks have three resnets # also, up blocks in hf are numbered in reverse from sd for j in range(3): _lowerCAmelCase = F"""decoder.up_blocks.{i}.resnets.{j}.""" _lowerCAmelCase = F"""decoder.up.{3-i}.block.{j}.""" vae_conversion_map.append((sd_up_prefix, hf_up_prefix)) # this part accounts for mid blocks in both the encoder and the decoder for i in range(2): _lowerCAmelCase = F"""mid_block.resnets.{i}.""" _lowerCAmelCase = F"""mid.block_{i+1}.""" vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix)) _lowerCAmelCase = [ # (stable-diffusion, HF Diffusers) ('''norm.''', '''group_norm.'''), ('''q.''', '''query.'''), ('''k.''', '''key.'''), ('''v.''', '''value.'''), ('''proj_out.''', '''proj_attn.'''), ] def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" return w.reshape(*w.shape , 1 , 1 ) def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" lowerCAmelCase__ : Optional[int] = {k: k for k in vae_state_dict.keys()} for k, v in mapping.items(): for sd_part, hf_part in vae_conversion_map: lowerCAmelCase__ : str = v.replace(UpperCamelCase , UpperCamelCase ) lowerCAmelCase__ : Optional[Any] = v for k, v in mapping.items(): if "attentions" in k: for sd_part, hf_part in vae_conversion_map_attn: lowerCAmelCase__ : Dict = v.replace(UpperCamelCase , UpperCamelCase ) lowerCAmelCase__ : List[Any] = v lowerCAmelCase__ : Union[str, Any] = {v: vae_state_dict[k] for k, v in mapping.items()} lowerCAmelCase__ : Tuple = ["""q""", """k""", """v""", """proj_out"""] for k, v in new_state_dict.items(): for weight_name in weights_to_convert: if f"""mid.attn_1.{weight_name}.weight""" in k: print(f"""Reshaping {k} for SD format""" ) lowerCAmelCase__ : Optional[int] = reshape_weight_for_sd(UpperCamelCase ) return new_state_dict # =========================# # Text Encoder Conversion # # =========================# _lowerCAmelCase = [ # (stable-diffusion, HF Diffusers) ('''resblocks.''', '''text_model.encoder.layers.'''), ('''ln_1''', '''layer_norm1'''), ('''ln_2''', '''layer_norm2'''), ('''.c_fc.''', '''.fc1.'''), ('''.c_proj.''', '''.fc2.'''), ('''.attn''', '''.self_attn'''), ('''ln_final.''', '''transformer.text_model.final_layer_norm.'''), ('''token_embedding.weight''', '''transformer.text_model.embeddings.token_embedding.weight'''), ('''positional_embedding''', '''transformer.text_model.embeddings.position_embedding.weight'''), ] _lowerCAmelCase = {re.escape(x[1]): x[0] for x in textenc_conversion_lst} _lowerCAmelCase = re.compile('''|'''.join(protected.keys())) # Ordering is from https://github.com/pytorch/pytorch/blob/master/test/cpp/api/modules.cpp _lowerCAmelCase = {'''q''': 0, '''k''': 1, '''v''': 2} def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" lowerCAmelCase__ : Optional[Any] = {} lowerCAmelCase__ : int = {} lowerCAmelCase__ : List[Any] = {} for k, v in text_enc_dict.items(): if ( k.endswith(""".self_attn.q_proj.weight""" ) or k.endswith(""".self_attn.k_proj.weight""" ) or k.endswith(""".self_attn.v_proj.weight""" ) ): lowerCAmelCase__ : Optional[int] = k[: -len(""".q_proj.weight""" )] lowerCAmelCase__ : Tuple = k[-len("""q_proj.weight""" )] if k_pre not in capture_qkv_weight: lowerCAmelCase__ : List[Any] = [None, None, None] lowerCAmelCase__ : Dict = v continue if ( k.endswith(""".self_attn.q_proj.bias""" ) or k.endswith(""".self_attn.k_proj.bias""" ) or k.endswith(""".self_attn.v_proj.bias""" ) ): lowerCAmelCase__ : str = k[: -len(""".q_proj.bias""" )] lowerCAmelCase__ : List[str] = k[-len("""q_proj.bias""" )] if k_pre not in capture_qkv_bias: lowerCAmelCase__ : Union[str, Any] = [None, None, None] lowerCAmelCase__ : Any = v continue lowerCAmelCase__ : Dict = textenc_pattern.sub(lambda UpperCamelCase : protected[re.escape(m.group(0 ) )] , UpperCamelCase ) lowerCAmelCase__ : Union[str, Any] = v for k_pre, tensors in capture_qkv_weight.items(): if None in tensors: raise Exception("""CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing""" ) lowerCAmelCase__ : Any = textenc_pattern.sub(lambda UpperCamelCase : protected[re.escape(m.group(0 ) )] , UpperCamelCase ) lowerCAmelCase__ : Tuple = torch.cat(UpperCamelCase ) for k_pre, tensors in capture_qkv_bias.items(): if None in tensors: raise Exception("""CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing""" ) lowerCAmelCase__ : str = textenc_pattern.sub(lambda UpperCamelCase : protected[re.escape(m.group(0 ) )] , UpperCamelCase ) lowerCAmelCase__ : List[Any] = torch.cat(UpperCamelCase ) return new_state_dict def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" return text_enc_dict if __name__ == "__main__": _lowerCAmelCase = argparse.ArgumentParser() parser.add_argument('''--model_path''', default=None, type=str, required=True, help='''Path to the model to convert.''') parser.add_argument('''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the output model.''') parser.add_argument('''--half''', action='''store_true''', help='''Save weights in half precision.''') parser.add_argument( '''--use_safetensors''', action='''store_true''', help='''Save weights use safetensors, default is ckpt.''' ) _lowerCAmelCase = parser.parse_args() assert args.model_path is not None, "Must provide a model path!" assert args.checkpoint_path is not None, "Must provide a checkpoint path!" # Path for safetensors _lowerCAmelCase = osp.join(args.model_path, '''unet''', '''diffusion_pytorch_model.safetensors''') _lowerCAmelCase = osp.join(args.model_path, '''vae''', '''diffusion_pytorch_model.safetensors''') _lowerCAmelCase = osp.join(args.model_path, '''text_encoder''', '''model.safetensors''') # Load models from safetensors if it exists, if it doesn't pytorch if osp.exists(unet_path): _lowerCAmelCase = load_file(unet_path, device='''cpu''') else: _lowerCAmelCase = osp.join(args.model_path, '''unet''', '''diffusion_pytorch_model.bin''') _lowerCAmelCase = torch.load(unet_path, map_location='''cpu''') if osp.exists(vae_path): _lowerCAmelCase = load_file(vae_path, device='''cpu''') else: _lowerCAmelCase = osp.join(args.model_path, '''vae''', '''diffusion_pytorch_model.bin''') _lowerCAmelCase = torch.load(vae_path, map_location='''cpu''') if osp.exists(text_enc_path): _lowerCAmelCase = load_file(text_enc_path, device='''cpu''') else: _lowerCAmelCase = osp.join(args.model_path, '''text_encoder''', '''pytorch_model.bin''') _lowerCAmelCase = torch.load(text_enc_path, map_location='''cpu''') # Convert the UNet model _lowerCAmelCase = convert_unet_state_dict(unet_state_dict) _lowerCAmelCase = {'''model.diffusion_model.''' + k: v for k, v in unet_state_dict.items()} # Convert the VAE model _lowerCAmelCase = convert_vae_state_dict(vae_state_dict) _lowerCAmelCase = {'''first_stage_model.''' + k: v for k, v in vae_state_dict.items()} # Easiest way to identify v2.0 model seems to be that the text encoder (OpenCLIP) is deeper _lowerCAmelCase = '''text_model.encoder.layers.22.layer_norm2.bias''' in text_enc_dict if is_vaa_model: # Need to add the tag 'transformer' in advance so we can knock it out from the final layer-norm _lowerCAmelCase = {'''transformer.''' + k: v for k, v in text_enc_dict.items()} _lowerCAmelCase = convert_text_enc_state_dict_vaa(text_enc_dict) _lowerCAmelCase = {'''cond_stage_model.model.''' + k: v for k, v in text_enc_dict.items()} else: _lowerCAmelCase = convert_text_enc_state_dict(text_enc_dict) _lowerCAmelCase = {'''cond_stage_model.transformer.''' + k: v for k, v in text_enc_dict.items()} # Put together new checkpoint _lowerCAmelCase = {**unet_state_dict, **vae_state_dict, **text_enc_dict} if args.half: _lowerCAmelCase = {k: v.half() for k, v in state_dict.items()} if args.use_safetensors: save_file(state_dict, args.checkpoint_path) else: _lowerCAmelCase = {'''state_dict''': state_dict} torch.save(state_dict, args.checkpoint_path)
37
0
UpperCAmelCase__ = "0.18.2" from .configuration_utils import ConfigMixin from .utils import ( OptionalDependencyNotAvailable, is_flax_available, is_inflect_available, is_invisible_watermark_available, is_k_diffusion_available, is_k_diffusion_version, is_librosa_available, is_note_seq_available, is_onnx_available, is_scipy_available, is_torch_available, is_torchsde_available, is_transformers_available, is_transformers_version, is_unidecode_available, logging, ) try: if not is_onnx_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_onnx_objects import * # noqa F403 else: from .pipelines import OnnxRuntimeModel try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_pt_objects import * # noqa F403 else: from .models import ( AutoencoderKL, ControlNetModel, ModelMixin, PriorTransformer, TaFilmDecoder, TransformeraDModel, UNetaDModel, UNetaDConditionModel, UNetaDModel, UNetaDConditionModel, VQModel, ) from .optimization import ( get_constant_schedule, get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, get_scheduler, ) from .pipelines import ( AudioPipelineOutput, ConsistencyModelPipeline, DanceDiffusionPipeline, DDIMPipeline, DDPMPipeline, DiffusionPipeline, DiTPipeline, ImagePipelineOutput, KarrasVePipeline, LDMPipeline, LDMSuperResolutionPipeline, PNDMPipeline, RePaintPipeline, ScoreSdeVePipeline, ) from .schedulers import ( CMStochasticIterativeScheduler, DDIMInverseScheduler, DDIMParallelScheduler, DDIMScheduler, DDPMParallelScheduler, DDPMScheduler, DEISMultistepScheduler, DPMSolverMultistepInverseScheduler, DPMSolverMultistepScheduler, DPMSolverSinglestepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, HeunDiscreteScheduler, IPNDMScheduler, KarrasVeScheduler, KDPMaAncestralDiscreteScheduler, KDPMaDiscreteScheduler, PNDMScheduler, RePaintScheduler, SchedulerMixin, ScoreSdeVeScheduler, UnCLIPScheduler, UniPCMultistepScheduler, VQDiffusionScheduler, ) from .training_utils import EMAModel try: if not (is_torch_available() and is_scipy_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_scipy_objects import * # noqa F403 else: from .schedulers import LMSDiscreteScheduler try: if not (is_torch_available() and is_torchsde_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_torchsde_objects import * # noqa F403 else: from .schedulers import DPMSolverSDEScheduler try: if not (is_torch_available() and is_transformers_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .pipelines import ( AltDiffusionImgaImgPipeline, AltDiffusionPipeline, AudioLDMPipeline, CycleDiffusionPipeline, IFImgaImgPipeline, IFImgaImgSuperResolutionPipeline, IFInpaintingPipeline, IFInpaintingSuperResolutionPipeline, IFPipeline, IFSuperResolutionPipeline, ImageTextPipelineOutput, KandinskyImgaImgPipeline, KandinskyInpaintPipeline, KandinskyPipeline, KandinskyPriorPipeline, KandinskyVaaControlnetImgaImgPipeline, KandinskyVaaControlnetPipeline, KandinskyVaaImgaImgPipeline, KandinskyVaaInpaintPipeline, KandinskyVaaPipeline, KandinskyVaaPriorEmbaEmbPipeline, KandinskyVaaPriorPipeline, LDMTextToImagePipeline, PaintByExamplePipeline, SemanticStableDiffusionPipeline, ShapEImgaImgPipeline, ShapEPipeline, StableDiffusionAttendAndExcitePipeline, StableDiffusionControlNetImgaImgPipeline, StableDiffusionControlNetInpaintPipeline, StableDiffusionControlNetPipeline, StableDiffusionDepthaImgPipeline, StableDiffusionDiffEditPipeline, StableDiffusionImageVariationPipeline, StableDiffusionImgaImgPipeline, StableDiffusionInpaintPipeline, StableDiffusionInpaintPipelineLegacy, StableDiffusionInstructPixaPixPipeline, StableDiffusionLatentUpscalePipeline, StableDiffusionLDMaDPipeline, StableDiffusionModelEditingPipeline, StableDiffusionPanoramaPipeline, StableDiffusionParadigmsPipeline, StableDiffusionPipeline, StableDiffusionPipelineSafe, StableDiffusionPixaPixZeroPipeline, StableDiffusionSAGPipeline, StableDiffusionUpscalePipeline, StableUnCLIPImgaImgPipeline, StableUnCLIPPipeline, TextToVideoSDPipeline, TextToVideoZeroPipeline, UnCLIPImageVariationPipeline, UnCLIPPipeline, UniDiffuserModel, UniDiffuserPipeline, UniDiffuserTextDecoder, VersatileDiffusionDualGuidedPipeline, VersatileDiffusionImageVariationPipeline, VersatileDiffusionPipeline, VersatileDiffusionTextToImagePipeline, VideoToVideoSDPipeline, VQDiffusionPipeline, ) try: if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403 else: from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline try: if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403 else: from .pipelines import StableDiffusionKDiffusionPipeline try: if not (is_torch_available() and is_transformers_available() and is_onnx_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403 else: from .pipelines import ( OnnxStableDiffusionImgaImgPipeline, OnnxStableDiffusionInpaintPipeline, OnnxStableDiffusionInpaintPipelineLegacy, OnnxStableDiffusionPipeline, OnnxStableDiffusionUpscalePipeline, StableDiffusionOnnxPipeline, ) try: if not (is_torch_available() and is_librosa_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_librosa_objects import * # noqa F403 else: from .pipelines import AudioDiffusionPipeline, Mel try: if not (is_transformers_available() and is_torch_available() and is_note_seq_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403 else: from .pipelines import SpectrogramDiffusionPipeline try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_flax_objects import * # noqa F403 else: from .models.controlnet_flax import FlaxControlNetModel from .models.modeling_flax_utils import FlaxModelMixin from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel from .models.vae_flax import FlaxAutoencoderKL from .pipelines import FlaxDiffusionPipeline from .schedulers import ( FlaxDDIMScheduler, FlaxDDPMScheduler, FlaxDPMSolverMultistepScheduler, FlaxKarrasVeScheduler, FlaxLMSDiscreteScheduler, FlaxPNDMScheduler, FlaxSchedulerMixin, FlaxScoreSdeVeScheduler, ) try: if not (is_flax_available() and is_transformers_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_flax_and_transformers_objects import * # noqa F403 else: from .pipelines import ( FlaxStableDiffusionControlNetPipeline, FlaxStableDiffusionImgaImgPipeline, FlaxStableDiffusionInpaintPipeline, FlaxStableDiffusionPipeline, ) try: if not (is_note_seq_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_note_seq_objects import * # noqa F403 else: from .pipelines import MidiProcessor
339
'''simple docstring''' import os from bleurt import score # From: git+https://github.com/google-research/bleurt.git import datasets _lowerCAmelCase = datasets.logging.get_logger(__name__) _lowerCAmelCase = '''\ @inproceedings{bleurt, title={BLEURT: Learning Robust Metrics for Text Generation}, author={Thibault Sellam and Dipanjan Das and Ankur P. Parikh}, booktitle={ACL}, year={2020}, url={https://arxiv.org/abs/2004.04696} } ''' _lowerCAmelCase = '''\ BLEURT a learnt evaluation metric for Natural Language Generation. It is built using multiple phases of transfer learning starting from a pretrained BERT model (Devlin et al. 2018) and then employing another pre-training phrase using synthetic data. Finally it is trained on WMT human annotations. You may run BLEURT out-of-the-box or fine-tune it for your specific application (the latter is expected to perform better). See the project\'s README at https://github.com/google-research/bleurt#readme for more information. ''' _lowerCAmelCase = ''' BLEURT score. Args: `predictions` (list of str): prediction/candidate sentences `references` (list of str): reference sentences `checkpoint` BLEURT checkpoint. Will default to BLEURT-tiny if None. Returns: \'scores\': List of scores. Examples: >>> predictions = ["hello there", "general kenobi"] >>> references = ["hello there", "general kenobi"] >>> bleurt = datasets.load_metric("bleurt") >>> results = bleurt.compute(predictions=predictions, references=references) >>> print([round(v, 2) for v in results["scores"]]) [1.03, 1.04] ''' _lowerCAmelCase = { '''bleurt-tiny-128''': '''https://storage.googleapis.com/bleurt-oss/bleurt-tiny-128.zip''', '''bleurt-tiny-512''': '''https://storage.googleapis.com/bleurt-oss/bleurt-tiny-512.zip''', '''bleurt-base-128''': '''https://storage.googleapis.com/bleurt-oss/bleurt-base-128.zip''', '''bleurt-base-512''': '''https://storage.googleapis.com/bleurt-oss/bleurt-base-512.zip''', '''bleurt-large-128''': '''https://storage.googleapis.com/bleurt-oss/bleurt-large-128.zip''', '''bleurt-large-512''': '''https://storage.googleapis.com/bleurt-oss/bleurt-large-512.zip''', '''BLEURT-20-D3''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D3.zip''', '''BLEURT-20-D6''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D6.zip''', '''BLEURT-20-D12''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D12.zip''', '''BLEURT-20''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20.zip''', } @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCAmelCase_( datasets.Metric ): '''simple docstring''' def UpperCAmelCase_ ( self ) -> Union[str, Any]: return datasets.MetricInfo( description=_DESCRIPTION ,citation=_CITATION ,homepage="""https://github.com/google-research/bleurt""" ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features( { """predictions""": datasets.Value("""string""" ,id="""sequence""" ), """references""": datasets.Value("""string""" ,id="""sequence""" ), } ) ,codebase_urls=["""https://github.com/google-research/bleurt"""] ,reference_urls=["""https://github.com/google-research/bleurt""", """https://arxiv.org/abs/2004.04696"""] ,) def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Tuple: # check that config name specifies a valid BLEURT model if self.config_name == "default": logger.warning( """Using default BLEURT-Base checkpoint for sequence maximum length 128. """ """You can use a bigger model for better results with e.g.: datasets.load_metric('bleurt', 'bleurt-large-512').""" ) lowerCAmelCase__ : str = """bleurt-base-128""" if self.config_name.lower() in CHECKPOINT_URLS: lowerCAmelCase__ : Union[str, Any] = self.config_name.lower() elif self.config_name.upper() in CHECKPOINT_URLS: lowerCAmelCase__ : List[Any] = self.config_name.upper() else: raise KeyError( F"""{self.config_name} model not found. You should supply the name of a model checkpoint for bleurt in {CHECKPOINT_URLS.keys()}""" ) # download the model checkpoint specified by self.config_name and set up the scorer lowerCAmelCase__ : int = dl_manager.download_and_extract(CHECKPOINT_URLS[checkpoint_name] ) lowerCAmelCase__ : Dict = score.BleurtScorer(os.path.join(__UpperCAmelCase ,__UpperCAmelCase ) ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ) -> Union[str, Any]: lowerCAmelCase__ : Union[str, Any] = self.scorer.score(references=__UpperCAmelCase ,candidates=__UpperCAmelCase ) return {"scores": scores}
37
0
from pickle import UnpicklingError import jax import jax.numpy as jnp import numpy as np from flax.serialization import from_bytes from flax.traverse_util import flatten_dict from ..utils import logging A : Union[str, Any] = logging.get_logger(__name__) def __lowerCAmelCase ( a__ , a__ ) -> Optional[int]: try: with open(a__ , '''rb''' ) as flax_state_f: __a = from_bytes(a__ , flax_state_f.read() ) except UnpicklingError as e: try: with open(a__ ) as f: if f.read().startswith('''version''' ): raise OSError( '''You seem to have cloned a repository without having git-lfs installed. Please''' ''' install git-lfs and run `git lfs install` followed by `git lfs pull` in the''' ''' folder you cloned.''' ) else: raise ValueError from e except (UnicodeDecodeError, ValueError): raise EnvironmentError(F"""Unable to convert {model_file} to Flax deserializable object. """ ) return load_flax_weights_in_pytorch_model(a__ , a__ ) def __lowerCAmelCase ( a__ , a__ ) -> Optional[Any]: try: import torch # noqa: F401 except ImportError: logger.error( '''Loading Flax weights in PyTorch requires both PyTorch and Flax to be installed. Please see''' ''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation''' ''' instructions.''' ) raise # check if we have bf16 weights __a = flatten_dict(jax.tree_util.tree_map(lambda a__ : x.dtype == jnp.bfloataa , a__ ) ).values() if any(a__ ): # convert all weights to fp32 if they are bf16 since torch.from_numpy can-not handle bf16 # and bf16 is not fully supported in PT yet. logger.warning( '''Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` ''' '''before loading those in PyTorch model.''' ) __a = jax.tree_util.tree_map( lambda a__ : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , a__ ) __a = """""" __a = flatten_dict(a__ , sep='''.''' ) __a = pt_model.state_dict() # keep track of unexpected & missing keys __a = [] __a = set(pt_model_dict.keys() ) for flax_key_tuple, flax_tensor in flax_state_dict.items(): __a = flax_key_tuple.split('''.''' ) if flax_key_tuple_array[-1] == "kernel" and flax_tensor.ndim == 4: __a = flax_key_tuple_array[:-1] + ["""weight"""] __a = jnp.transpose(a__ , (3, 2, 0, 1) ) elif flax_key_tuple_array[-1] == "kernel": __a = flax_key_tuple_array[:-1] + ["""weight"""] __a = flax_tensor.T elif flax_key_tuple_array[-1] == "scale": __a = flax_key_tuple_array[:-1] + ["""weight"""] if "time_embedding" not in flax_key_tuple_array: for i, flax_key_tuple_string in enumerate(a__ ): __a = ( flax_key_tuple_string.replace('''_0''' , '''.0''' ) .replace('''_1''' , '''.1''' ) .replace('''_2''' , '''.2''' ) .replace('''_3''' , '''.3''' ) .replace('''_4''' , '''.4''' ) .replace('''_5''' , '''.5''' ) .replace('''_6''' , '''.6''' ) .replace('''_7''' , '''.7''' ) .replace('''_8''' , '''.8''' ) .replace('''_9''' , '''.9''' ) ) __a = """.""".join(a__ ) if flax_key in pt_model_dict: if flax_tensor.shape != pt_model_dict[flax_key].shape: raise ValueError( F"""Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected """ F"""to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.""" ) else: # add weight to pytorch dict __a = np.asarray(a__ ) if not isinstance(a__ , np.ndarray ) else flax_tensor __a = torch.from_numpy(a__ ) # remove from missing keys missing_keys.remove(a__ ) else: # weight is not expected by PyTorch model unexpected_keys.append(a__ ) pt_model.load_state_dict(a__ ) # re-transform missing_keys to list __a = list(a__ ) if len(a__ ) > 0: logger.warning( '''Some weights of the Flax model were not used when initializing the PyTorch model''' F""" {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing""" F""" {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture""" ''' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This''' F""" IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect""" ''' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a''' ''' FlaxBertForSequenceClassification model).''' ) if len(a__ ) > 0: logger.warning( F"""Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly""" F""" initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to""" ''' use it for predictions and inference.''' ) return pt_model
6
'''simple docstring''' import uuid from typing import Any, Dict, List, Optional, Union from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging from .base import PIPELINE_INIT_ARGS, Pipeline if is_tf_available(): import tensorflow as tf if is_torch_available(): import torch _lowerCAmelCase = logging.get_logger(__name__) class lowerCAmelCase_: '''simple docstring''' def __init__( self ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase=None ,__UpperCAmelCase=None ) -> str: if not conversation_id: lowerCAmelCase__ : List[str] = uuid.uuida() if past_user_inputs is None: lowerCAmelCase__ : List[Any] = [] if generated_responses is None: lowerCAmelCase__ : str = [] lowerCAmelCase__ : uuid.UUID = conversation_id lowerCAmelCase__ : List[str] = past_user_inputs lowerCAmelCase__ : List[str] = generated_responses lowerCAmelCase__ : Optional[str] = text def __eq__( self ,__UpperCAmelCase ) -> Dict: if not isinstance(__UpperCAmelCase ,__UpperCAmelCase ): return False if self.uuid == other.uuid: return True return ( self.new_user_input == other.new_user_input and self.past_user_inputs == other.past_user_inputs and self.generated_responses == other.generated_responses ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = False ) -> Optional[Any]: if self.new_user_input: if overwrite: logger.warning( F"""User input added while unprocessed input was existing: \"{self.new_user_input}\" was overwritten """ F"""with: \"{text}\".""" ) lowerCAmelCase__ : Optional[int] = text else: logger.warning( F"""User input added while unprocessed input was existing: \"{self.new_user_input}\" new input """ F"""ignored: \"{text}\". Set `overwrite` to True to overwrite unprocessed user input""" ) else: lowerCAmelCase__ : Optional[Any] = text def UpperCAmelCase_ ( self ) -> List[Any]: if self.new_user_input: self.past_user_inputs.append(self.new_user_input ) lowerCAmelCase__ : Union[str, Any] = None def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Tuple: self.generated_responses.append(__UpperCAmelCase ) def UpperCAmelCase_ ( self ) -> List[str]: for user_input, generated_response in zip(self.past_user_inputs ,self.generated_responses ): yield True, user_input yield False, generated_response if self.new_user_input: yield True, self.new_user_input def __repr__( self ) -> Tuple: lowerCAmelCase__ : Tuple = F"""Conversation id: {self.uuid} \n""" for is_user, text in self.iter_texts(): lowerCAmelCase__ : Any = """user""" if is_user else """bot""" output += F"""{name} >> {text} \n""" return output @add_end_docstrings( SCREAMING_SNAKE_CASE_ , R''' min_length_for_response (`int`, *optional*, defaults to 32): The minimum length (in number of tokens) for a response. minimum_tokens (`int`, *optional*, defaults to 10): The minimum length of tokens to leave for a response. ''' , ) class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' def __init__( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Tuple: super().__init__(*__UpperCAmelCase ,**__UpperCAmelCase ) if self.tokenizer.pad_token_id is None: lowerCAmelCase__ : Tuple = self.tokenizer.eos_token def UpperCAmelCase_ ( self ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,**__UpperCAmelCase ) -> Optional[int]: lowerCAmelCase__ : List[Any] = {} lowerCAmelCase__ : Optional[int] = {} lowerCAmelCase__ : List[str] = {} if min_length_for_response is not None: lowerCAmelCase__ : Any = min_length_for_response if minimum_tokens is not None: lowerCAmelCase__ : Optional[int] = minimum_tokens if "max_length" in generate_kwargs: lowerCAmelCase__ : Optional[Any] = generate_kwargs["""max_length"""] # self.max_length = generate_kwargs.get("max_length", self.model.config.max_length) if clean_up_tokenization_spaces is not None: lowerCAmelCase__ : int = clean_up_tokenization_spaces if generate_kwargs: forward_params.update(__UpperCAmelCase ) return preprocess_params, forward_params, postprocess_params def __call__( self ,__UpperCAmelCase ,__UpperCAmelCase=0 ,**__UpperCAmelCase ) -> List[str]: lowerCAmelCase__ : Optional[int] = super().__call__(__UpperCAmelCase ,num_workers=__UpperCAmelCase ,**__UpperCAmelCase ) if isinstance(__UpperCAmelCase ,__UpperCAmelCase ) and len(__UpperCAmelCase ) == 1: return outputs[0] return outputs def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase=32 ) -> Dict[str, Any]: if not isinstance(__UpperCAmelCase ,__UpperCAmelCase ): raise ValueError("""ConversationalPipeline, expects Conversation as inputs""" ) if conversation.new_user_input is None: raise ValueError( F"""Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. """ """Add user inputs with the conversation's `add_user_input` method""" ) if hasattr(self.tokenizer ,"""_build_conversation_input_ids""" ): lowerCAmelCase__ : str = self.tokenizer._build_conversation_input_ids(__UpperCAmelCase ) else: # If the tokenizer cannot handle conversations, we default to only the old version lowerCAmelCase__ : List[Any] = self._legacy_parse_and_tokenize(__UpperCAmelCase ) if self.framework == "pt": lowerCAmelCase__ : List[Any] = torch.LongTensor([input_ids] ) elif self.framework == "tf": lowerCAmelCase__ : Dict = tf.constant([input_ids] ) return {"input_ids": input_ids, "conversation": conversation} def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase=10 ,**__UpperCAmelCase ) -> Dict: lowerCAmelCase__ : Optional[Any] = generate_kwargs.get("""max_length""" ,self.model.config.max_length ) lowerCAmelCase__ : Optional[Any] = model_inputs["""input_ids"""].shape[1] if max_length - minimum_tokens < n: logger.warning(F"""Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})""" ) lowerCAmelCase__ : str = max_length - minimum_tokens lowerCAmelCase__ : Union[str, Any] = model_inputs["""input_ids"""][:, -trim:] if "attention_mask" in model_inputs: lowerCAmelCase__ : Tuple = model_inputs["""attention_mask"""][:, -trim:] lowerCAmelCase__ : str = model_inputs.pop("""conversation""" ) lowerCAmelCase__ : Union[str, Any] = max_length lowerCAmelCase__ : Any = self.model.generate(**__UpperCAmelCase ,**__UpperCAmelCase ) if self.model.config.is_encoder_decoder: lowerCAmelCase__ : int = 1 else: lowerCAmelCase__ : int = n return {"output_ids": output_ids[:, start_position:], "conversation": conversation} def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase=True ) -> List[str]: lowerCAmelCase__ : Optional[int] = model_outputs["""output_ids"""] lowerCAmelCase__ : Tuple = self.tokenizer.decode( output_ids[0] ,skip_special_tokens=__UpperCAmelCase ,clean_up_tokenization_spaces=__UpperCAmelCase ,) lowerCAmelCase__ : Union[str, Any] = model_outputs["""conversation"""] conversation.mark_processed() conversation.append_response(__UpperCAmelCase ) return conversation def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Dict: lowerCAmelCase__ : Dict = self.tokenizer.eos_token_id lowerCAmelCase__ : int = [] for is_user, text in conversation.iter_texts(): if eos_token_id is not None: input_ids.extend(self.tokenizer.encode(__UpperCAmelCase ,add_special_tokens=__UpperCAmelCase ) + [eos_token_id] ) else: input_ids.extend(self.tokenizer.encode(__UpperCAmelCase ,add_special_tokens=__UpperCAmelCase ) ) if len(__UpperCAmelCase ) > self.tokenizer.model_max_length: lowerCAmelCase__ : Optional[Any] = input_ids[-self.tokenizer.model_max_length :] return input_ids
37
0
import os from bleurt import score # From: git+https://github.com/google-research/bleurt.git import datasets _lowerCamelCase : Union[str, Any] = datasets.logging.get_logger(__name__) _lowerCamelCase : str = """\ @inproceedings{bleurt, title={BLEURT: Learning Robust Metrics for Text Generation}, author={Thibault Sellam and Dipanjan Das and Ankur P. Parikh}, booktitle={ACL}, year={2020}, url={https://arxiv.org/abs/2004.04696} } """ _lowerCamelCase : Any = """\ BLEURT a learnt evaluation metric for Natural Language Generation. It is built using multiple phases of transfer learning starting from a pretrained BERT model (Devlin et al. 2018) and then employing another pre-training phrase using synthetic data. Finally it is trained on WMT human annotations. You may run BLEURT out-of-the-box or fine-tune it for your specific application (the latter is expected to perform better). See the project\'s README at https://github.com/google-research/bleurt#readme for more information. """ _lowerCamelCase : Dict = """ BLEURT score. Args: `predictions` (list of str): prediction/candidate sentences `references` (list of str): reference sentences `checkpoint` BLEURT checkpoint. Will default to BLEURT-tiny if None. Returns: \'scores\': List of scores. Examples: >>> predictions = [\"hello there\", \"general kenobi\"] >>> references = [\"hello there\", \"general kenobi\"] >>> bleurt = datasets.load_metric(\"bleurt\") >>> results = bleurt.compute(predictions=predictions, references=references) >>> print([round(v, 2) for v in results[\"scores\"]]) [1.03, 1.04] """ _lowerCamelCase : Optional[Any] = { """bleurt-tiny-128""": """https://storage.googleapis.com/bleurt-oss/bleurt-tiny-128.zip""", """bleurt-tiny-512""": """https://storage.googleapis.com/bleurt-oss/bleurt-tiny-512.zip""", """bleurt-base-128""": """https://storage.googleapis.com/bleurt-oss/bleurt-base-128.zip""", """bleurt-base-512""": """https://storage.googleapis.com/bleurt-oss/bleurt-base-512.zip""", """bleurt-large-128""": """https://storage.googleapis.com/bleurt-oss/bleurt-large-128.zip""", """bleurt-large-512""": """https://storage.googleapis.com/bleurt-oss/bleurt-large-512.zip""", """BLEURT-20-D3""": """https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D3.zip""", """BLEURT-20-D6""": """https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D6.zip""", """BLEURT-20-D12""": """https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D12.zip""", """BLEURT-20""": """https://storage.googleapis.com/bleurt-oss-21/BLEURT-20.zip""", } @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class UpperCamelCase_ ( datasets.Metric ): '''simple docstring''' def SCREAMING_SNAKE_CASE ( self : Any) ->Union[str, Any]: '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , homepage='''https://github.com/google-research/bleurt''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''string''' , id='''sequence'''), '''references''': datasets.Value('''string''' , id='''sequence'''), }) , codebase_urls=['''https://github.com/google-research/bleurt'''] , reference_urls=['''https://github.com/google-research/bleurt''', '''https://arxiv.org/abs/2004.04696'''] , ) def SCREAMING_SNAKE_CASE ( self : str , UpperCAmelCase__ : int) ->Tuple: '''simple docstring''' if self.config_name == "default": logger.warning( '''Using default BLEURT-Base checkpoint for sequence maximum length 128. ''' '''You can use a bigger model for better results with e.g.: datasets.load_metric(\'bleurt\', \'bleurt-large-512\').''') A__ = """bleurt-base-128""" if self.config_name.lower() in CHECKPOINT_URLS: A__ = self.config_name.lower() elif self.config_name.upper() in CHECKPOINT_URLS: A__ = self.config_name.upper() else: raise KeyError( f"""{self.config_name} model not found. You should supply the name of a model checkpoint for bleurt in {CHECKPOINT_URLS.keys()}""") # download the model checkpoint specified by self.config_name and set up the scorer A__ = dl_manager.download_and_extract(CHECKPOINT_URLS[checkpoint_name]) A__ = score.BleurtScorer(os.path.join(__UpperCAmelCase , __UpperCAmelCase)) def SCREAMING_SNAKE_CASE ( self : Tuple , UpperCAmelCase__ : Any , UpperCAmelCase__ : int) ->Union[str, Any]: '''simple docstring''' A__ = self.scorer.score(references=__UpperCAmelCase , candidates=__UpperCAmelCase) return {"scores": scores}
14
'''simple docstring''' from collections import UserDict from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING from ..tf_utils import stable_softmax _lowerCAmelCase = logging.get_logger(__name__) @add_end_docstrings(SCREAMING_SNAKE_CASE_ ) class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' def __init__( self ,**__UpperCAmelCase ) -> Tuple: super().__init__(**__UpperCAmelCase ) requires_backends(self ,"""vision""" ) self.check_model_type( TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING if self.framework == """tf""" else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING ) def __call__( self ,__UpperCAmelCase ,**__UpperCAmelCase ) -> str: return super().__call__(__UpperCAmelCase ,**__UpperCAmelCase ) def UpperCAmelCase_ ( self ,**__UpperCAmelCase ) -> str: lowerCAmelCase__ : List[Any] = {} if "candidate_labels" in kwargs: lowerCAmelCase__ : int = kwargs["""candidate_labels"""] if "hypothesis_template" in kwargs: lowerCAmelCase__ : Optional[int] = kwargs["""hypothesis_template"""] return preprocess_params, {}, {} def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase=None ,__UpperCAmelCase="This is a photo of {}." ) -> int: lowerCAmelCase__ : str = load_image(__UpperCAmelCase ) lowerCAmelCase__ : Dict = self.image_processor(images=[image] ,return_tensors=self.framework ) lowerCAmelCase__ : List[Any] = candidate_labels lowerCAmelCase__ : List[str] = [hypothesis_template.format(__UpperCAmelCase ) for x in candidate_labels] lowerCAmelCase__ : Optional[Any] = self.tokenizer(__UpperCAmelCase ,return_tensors=self.framework ,padding=__UpperCAmelCase ) lowerCAmelCase__ : Tuple = [text_inputs] return inputs def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Union[str, Any]: lowerCAmelCase__ : Tuple = model_inputs.pop("""candidate_labels""" ) lowerCAmelCase__ : Union[str, Any] = model_inputs.pop("""text_inputs""" ) if isinstance(text_inputs[0] ,__UpperCAmelCase ): lowerCAmelCase__ : int = text_inputs[0] else: # Batching case. lowerCAmelCase__ : Dict = text_inputs[0][0] lowerCAmelCase__ : Any = self.model(**__UpperCAmelCase ,**__UpperCAmelCase ) lowerCAmelCase__ : Union[str, Any] = { """candidate_labels""": candidate_labels, """logits""": outputs.logits_per_image, } return model_outputs def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Any: lowerCAmelCase__ : Union[str, Any] = model_outputs.pop("""candidate_labels""" ) lowerCAmelCase__ : List[str] = model_outputs["""logits"""][0] if self.framework == "pt": lowerCAmelCase__ : List[str] = logits.softmax(dim=-1 ).squeeze(-1 ) lowerCAmelCase__ : Optional[Any] = probs.tolist() if not isinstance(__UpperCAmelCase ,__UpperCAmelCase ): lowerCAmelCase__ : Dict = [scores] elif self.framework == "tf": lowerCAmelCase__ : Any = stable_softmax(__UpperCAmelCase ,axis=-1 ) lowerCAmelCase__ : List[Any] = probs.numpy().tolist() else: raise ValueError(F"""Unsupported framework: {self.framework}""" ) lowerCAmelCase__ : Tuple = [ {"""score""": score, """label""": candidate_label} for score, candidate_label in sorted(zip(__UpperCAmelCase ,__UpperCAmelCase ) ,key=lambda __UpperCAmelCase : -x[0] ) ] return result
37
0
import argparse import os # New Code # import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils import find_executable_batch_size ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing how to ensure out-of-memory errors never # interrupt training, and builds off the `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## A : List[Any] = 16 A : Optional[int] = 32 def a__ ( __UpperCamelCase , __UpperCamelCase = 1_6 ): SCREAMING_SNAKE_CASE_ = AutoTokenizer.from_pretrained("bert-base-cased" ) SCREAMING_SNAKE_CASE_ = load_dataset("glue" , "mrpc" ) def tokenize_function(__UpperCamelCase ): # max_length=None => use the model max length (it's actually the default) SCREAMING_SNAKE_CASE_ = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=__UpperCamelCase , max_length=__UpperCamelCase ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): SCREAMING_SNAKE_CASE_ = datasets.map( __UpperCamelCase , batched=__UpperCamelCase , remove_columns=["idx", "sentence1", "sentence2"] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library SCREAMING_SNAKE_CASE_ = tokenized_datasets.rename_column("label" , "labels" ) def collate_fn(__UpperCamelCase ): # On TPU it's best to pad everything to the same length or training will be very slow. SCREAMING_SNAKE_CASE_ = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": SCREAMING_SNAKE_CASE_ = 1_6 elif accelerator.mixed_precision != "no": SCREAMING_SNAKE_CASE_ = 8 else: SCREAMING_SNAKE_CASE_ = None return tokenizer.pad( __UpperCamelCase , padding="longest" , max_length=__UpperCamelCase , pad_to_multiple_of=__UpperCamelCase , return_tensors="pt" , ) # Instantiate dataloaders. SCREAMING_SNAKE_CASE_ = DataLoader( tokenized_datasets["train"] , shuffle=__UpperCamelCase , collate_fn=__UpperCamelCase , batch_size=__UpperCamelCase ) SCREAMING_SNAKE_CASE_ = DataLoader( tokenized_datasets["validation"] , shuffle=__UpperCamelCase , collate_fn=__UpperCamelCase , batch_size=__UpperCamelCase ) return train_dataloader, eval_dataloader # For testing only if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1": from accelerate.test_utils.training import mocked_dataloaders A : List[str] = mocked_dataloaders # noqa: F811 def a__ ( __UpperCamelCase , __UpperCamelCase ): if os.environ.get("TESTING_MOCKED_DATALOADERS" , __UpperCamelCase ) == "1": SCREAMING_SNAKE_CASE_ = 2 # Initialize accelerator SCREAMING_SNAKE_CASE_ = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs SCREAMING_SNAKE_CASE_ = config["""lr"""] SCREAMING_SNAKE_CASE_ = int(config["num_epochs"] ) SCREAMING_SNAKE_CASE_ = int(config["seed"] ) SCREAMING_SNAKE_CASE_ = int(config["batch_size"] ) SCREAMING_SNAKE_CASE_ = evaluate.load("glue" , "mrpc" ) # New Code # # We now can define an inner training loop function. It should take a batch size as the only parameter, # and build the dataloaders in there. # It also gets our decorator @find_executable_batch_size(starting_batch_size=__UpperCamelCase ) def inner_training_loop(__UpperCamelCase ): # And now just move everything below under this function # We need to bring in the Accelerator object from earlier nonlocal accelerator # And reset all of its attributes that could hold onto any memory: accelerator.free_memory() # Then we can declare the model, optimizer, and everything else: set_seed(__UpperCamelCase ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) SCREAMING_SNAKE_CASE_ = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=__UpperCamelCase ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). SCREAMING_SNAKE_CASE_ = model.to(accelerator.device ) # Instantiate optimizer SCREAMING_SNAKE_CASE_ = AdamW(params=model.parameters() , lr=__UpperCamelCase ) SCREAMING_SNAKE_CASE_ = get_dataloaders(__UpperCamelCase , __UpperCamelCase ) # Instantiate scheduler SCREAMING_SNAKE_CASE_ = get_linear_schedule_with_warmup( optimizer=__UpperCamelCase , num_warmup_steps=1_0_0 , num_training_steps=(len(__UpperCamelCase ) * num_epochs) , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. SCREAMING_SNAKE_CASE_ = accelerator.prepare( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) # Now we train the model for epoch in range(__UpperCamelCase ): model.train() for step, batch in enumerate(__UpperCamelCase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) SCREAMING_SNAKE_CASE_ = model(**__UpperCamelCase ) SCREAMING_SNAKE_CASE_ = outputs.loss accelerator.backward(__UpperCamelCase ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(__UpperCamelCase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): SCREAMING_SNAKE_CASE_ = model(**__UpperCamelCase ) SCREAMING_SNAKE_CASE_ = outputs.logits.argmax(dim=-1 ) SCREAMING_SNAKE_CASE_ = accelerator.gather_for_metrics((predictions, batch["labels"]) ) metric.add_batch( predictions=__UpperCamelCase , references=__UpperCamelCase , ) SCREAMING_SNAKE_CASE_ = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F'''epoch {epoch}:''' , __UpperCamelCase ) # New Code # # And call it at the end with no arguments # Note: You could also refactor this outside of your training loop function inner_training_loop() def a__ ( ): SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser(description="Simple example of training script." ) parser.add_argument( "--mixed_precision" , type=__UpperCamelCase , default=__UpperCamelCase , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose" "between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10." "and an Nvidia Ampere GPU." , ) parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." ) SCREAMING_SNAKE_CASE_ = parser.parse_args() SCREAMING_SNAKE_CASE_ = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 4_2, """batch_size""": 1_6} training_function(__UpperCamelCase , __UpperCamelCase ) if __name__ == "__main__": main()
118
'''simple docstring''' import json import os import subprocess import unittest from ast import literal_eval import pytest from parameterized import parameterized, parameterized_class from . import is_sagemaker_available if is_sagemaker_available(): from sagemaker import Session, TrainingJobAnalytics from sagemaker.huggingface import HuggingFace @pytest.mark.skipif( literal_eval(os.getenv('''TEST_SAGEMAKER''' , '''False''' ) ) is not True , reason='''Skipping test because should only be run when releasing minor transformers version''' , ) @pytest.mark.usefixtures('''sm_env''' ) @parameterized_class( [ { '''framework''': '''pytorch''', '''script''': '''run_glue.py''', '''model_name_or_path''': '''distilbert-base-cased''', '''instance_type''': '''ml.p3.16xlarge''', '''results''': {'''train_runtime''': 6_5_0, '''eval_accuracy''': 0.7, '''eval_loss''': 0.6}, }, { '''framework''': '''pytorch''', '''script''': '''run_ddp.py''', '''model_name_or_path''': '''distilbert-base-cased''', '''instance_type''': '''ml.p3.16xlarge''', '''results''': {'''train_runtime''': 6_0_0, '''eval_accuracy''': 0.7, '''eval_loss''': 0.6}, }, { '''framework''': '''tensorflow''', '''script''': '''run_tf_dist.py''', '''model_name_or_path''': '''distilbert-base-cased''', '''instance_type''': '''ml.p3.16xlarge''', '''results''': {'''train_runtime''': 6_0_0, '''eval_accuracy''': 0.6, '''eval_loss''': 0.7}, }, ] ) class lowerCAmelCase_( unittest.TestCase ): '''simple docstring''' def UpperCAmelCase_ ( self ) -> Optional[Any]: if self.framework == "pytorch": subprocess.run( F"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() ,encoding="""utf-8""" ,check=__UpperCAmelCase ,) assert hasattr(self ,"""env""" ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Optional[Any]: lowerCAmelCase__ : Optional[int] = F"""{self.env.base_job_name}-{instance_count}-{'ddp' if 'ddp' in self.script else 'smd'}""" # distributed data settings lowerCAmelCase__ : Any = {"""smdistributed""": {"""dataparallel""": {"""enabled""": True}}} if self.script != """run_ddp.py""" else None # creates estimator return HuggingFace( entry_point=self.script ,source_dir=self.env.test_path ,role=self.env.role ,image_uri=self.env.image_uri ,base_job_name=__UpperCAmelCase ,instance_count=__UpperCAmelCase ,instance_type=self.instance_type ,debugger_hook_config=__UpperCAmelCase ,hyperparameters={**self.env.distributed_hyperparameters, """model_name_or_path""": self.model_name_or_path} ,metric_definitions=self.env.metric_definitions ,distribution=__UpperCAmelCase ,py_version="""py36""" ,) def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Optional[Any]: TrainingJobAnalytics(__UpperCAmelCase ).export_csv(F"""{self.env.test_path}/{job_name}_metrics.csv""" ) @parameterized.expand([(2,)] ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Any: # create estimator lowerCAmelCase__ : List[Any] = self.create_estimator(__UpperCAmelCase ) # run training estimator.fit() # result dataframe lowerCAmelCase__ : Any = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe() # extract kpis lowerCAmelCase__ : int = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""] ) lowerCAmelCase__ : List[Any] = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""] ) # get train time from SageMaker job, this includes starting, preprocessing, stopping lowerCAmelCase__ : List[str] = ( Session().describe_training_job(estimator.latest_training_job.name ).get("""TrainingTimeInSeconds""" ,99_9999 ) ) # assert kpis assert train_runtime <= self.results["train_runtime"] assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy ) assert all(t <= self.results["""eval_loss"""] for t in eval_loss ) # dump tests result into json file to share in PR with open(F"""{estimator.latest_training_job.name}.json""" ,"""w""" ) as outfile: json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} ,__UpperCAmelCase )
37
0
import torch import torch.nn as nn from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel from ...utils import logging UpperCAmelCase_ : str = logging.get_logger(__name__) def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Dict , __magic_name__ : Any ) -> List[str]: """simple docstring""" UpperCamelCase :Any = nn.functional.normalize(__magic_name__ ) UpperCamelCase :Union[str, Any] = nn.functional.normalize(__magic_name__ ) return torch.mm(__magic_name__ , normalized_text_embeds.t() ) class _SCREAMING_SNAKE_CASE ( _a ): snake_case__ : Dict = CLIPConfig snake_case__ : Union[str, Any] = ["""CLIPEncoderLayer"""] def __init__( self : Optional[Any] , __lowerCamelCase : CLIPConfig ): super().__init__(__lowerCamelCase ) UpperCamelCase :Optional[Any] = CLIPVisionModel(config.vision_config ) UpperCamelCase :Optional[Any] = nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=__lowerCamelCase ) UpperCamelCase :Tuple = nn.Parameter(torch.ones(17 , config.projection_dim ) , requires_grad=__lowerCamelCase ) UpperCamelCase :Union[str, Any] = nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=__lowerCamelCase ) UpperCamelCase :List[str] = nn.Parameter(torch.ones(17 ) , requires_grad=__lowerCamelCase ) UpperCamelCase :Any = nn.Parameter(torch.ones(3 ) , requires_grad=__lowerCamelCase ) @torch.no_grad() def _A ( self : Any , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[Any] ): UpperCamelCase :Tuple = self.vision_model(__lowerCamelCase )[1] # pooled_output UpperCamelCase :Optional[Any] = self.visual_projection(__lowerCamelCase ) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 UpperCamelCase :Tuple = cosine_distance(__lowerCamelCase , self.special_care_embeds ).cpu().float().numpy() UpperCamelCase :Union[str, Any] = cosine_distance(__lowerCamelCase , self.concept_embeds ).cpu().float().numpy() UpperCamelCase :Any = [] UpperCamelCase :Any = image_embeds.shape[0] for i in range(__lowerCamelCase ): UpperCamelCase :Any = {"""special_scores""": {}, """special_care""": [], """concept_scores""": {}, """bad_concepts""": []} # increase this value to create a stronger `nfsw` filter # at the cost of increasing the possibility of filtering benign images UpperCamelCase :Dict = 0.0 for concept_idx in range(len(special_cos_dist[0] ) ): UpperCamelCase :Dict = special_cos_dist[i][concept_idx] UpperCamelCase :List[str] = self.special_care_embeds_weights[concept_idx].item() UpperCamelCase :Optional[Any] = round(concept_cos - concept_threshold + adjustment , 3 ) if result_img["special_scores"][concept_idx] > 0: result_img["special_care"].append({concept_idx, result_img["""special_scores"""][concept_idx]} ) UpperCamelCase :str = 0.01 for concept_idx in range(len(cos_dist[0] ) ): UpperCamelCase :Optional[Any] = cos_dist[i][concept_idx] UpperCamelCase :Union[str, Any] = self.concept_embeds_weights[concept_idx].item() UpperCamelCase :Tuple = round(concept_cos - concept_threshold + adjustment , 3 ) if result_img["concept_scores"][concept_idx] > 0: result_img["bad_concepts"].append(__lowerCamelCase ) result.append(__lowerCamelCase ) UpperCamelCase :Dict = [len(res["""bad_concepts"""] ) > 0 for res in result] return images, has_nsfw_concepts @torch.no_grad() def _A ( self : List[str] , __lowerCamelCase : torch.FloatTensor , __lowerCamelCase : torch.FloatTensor ): UpperCamelCase :int = self.vision_model(__lowerCamelCase )[1] # pooled_output UpperCamelCase :Dict = self.visual_projection(__lowerCamelCase ) UpperCamelCase :str = cosine_distance(__lowerCamelCase , self.special_care_embeds ) UpperCamelCase :Any = cosine_distance(__lowerCamelCase , self.concept_embeds ) # increase this value to create a stronger `nsfw` filter # at the cost of increasing the possibility of filtering benign images UpperCamelCase :List[str] = 0.0 UpperCamelCase :Optional[int] = special_cos_dist - self.special_care_embeds_weights + adjustment # special_scores = special_scores.round(decimals=3) UpperCamelCase :List[str] = torch.any(special_scores > 0 , dim=1 ) UpperCamelCase :str = special_care * 0.01 UpperCamelCase :Optional[Any] = special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] ) UpperCamelCase :List[str] = (cos_dist - self.concept_embeds_weights) + special_adjustment # concept_scores = concept_scores.round(decimals=3) UpperCamelCase :Union[str, Any] = torch.any(concept_scores > 0 , dim=1 ) return images, has_nsfw_concepts
38
import os import unittest from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer from ...test_tokenization_common import TokenizerTesterMixin class _SCREAMING_SNAKE_CASE ( _a , unittest.TestCase ): snake_case__ : Optional[Any] = TransfoXLTokenizer snake_case__ : List[Any] = False snake_case__ : Tuple = False def _A ( self : str ): super().setUp() UpperCamelCase :Dict = [ """<unk>""", """[CLS]""", """[SEP]""", """want""", """unwanted""", """wa""", """un""", """running""", """,""", """low""", """l""", ] UpperCamelCase :str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) ) def _A ( self : List[str] , **__lowerCamelCase : Any ): UpperCamelCase :Any = True return TransfoXLTokenizer.from_pretrained(self.tmpdirname , **__lowerCamelCase ) def _A ( self : Any , __lowerCamelCase : int ): UpperCamelCase :List[Any] = """<unk> UNwanted , running""" UpperCamelCase :int = """<unk> unwanted, running""" return input_text, output_text def _A ( self : Tuple ): UpperCamelCase :List[str] = TransfoXLTokenizer(vocab_file=self.vocab_file , lower_case=__lowerCamelCase ) UpperCamelCase :Any = tokenizer.tokenize("""<unk> UNwanted , running""" ) self.assertListEqual(__lowerCamelCase , ["""<unk>""", """unwanted""", """,""", """running"""] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCamelCase ) , [0, 4, 8, 7] ) def _A ( self : Optional[Any] ): UpperCamelCase :List[Any] = TransfoXLTokenizer(lower_case=__lowerCamelCase ) self.assertListEqual( tokenizer.tokenize(""" \tHeLLo ! how \n Are yoU ? """ ) , ["""hello""", """!""", """how""", """are""", """you""", """?"""] ) def _A ( self : Union[str, Any] ): UpperCamelCase :int = TransfoXLTokenizer(lower_case=__lowerCamelCase ) self.assertListEqual( tokenizer.tokenize(""" \tHeLLo ! how \n Are yoU ? """ ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""] ) def _A ( self : Tuple ): UpperCamelCase :Any = TransfoXLTokenizer(lower_case=__lowerCamelCase ) UpperCamelCase :Optional[int] = """Hello (bracket) and side-scrolled [and] Henry's $5,000 with 3.34 m. What's up!?""" UpperCamelCase :Optional[int] = [ """Hello""", """(""", """bracket""", """)""", """and""", """side""", """@-@""", """scrolled""", """[""", """and""", """]""", """Henry""", """'s""", """$""", """5""", """@,@""", """000""", """with""", """3""", """@.@""", """34""", """m""", """.""", """What""", """'s""", """up""", """!""", """?""", ] self.assertListEqual(tokenizer.tokenize(__lowerCamelCase ) , __lowerCamelCase ) self.assertEqual(tokenizer.convert_tokens_to_string(__lowerCamelCase ) , __lowerCamelCase ) def _A ( self : List[Any] ): UpperCamelCase :Any = self.get_tokenizer() UpperCamelCase :List[str] = len(__lowerCamelCase ) tokenizer.add_tokens(["""new1""", """new2"""] ) tokenizer.move_added_token("""new1""" , 1 ) # Check that moved token is not copied (duplicate) self.assertEqual(len(__lowerCamelCase ) , original_len + 2 ) # Check that token is moved to specified id self.assertEqual(tokenizer.encode("""new1""" ) , [1] ) self.assertEqual(tokenizer.decode([1] ) , """new1""" )
38
1
from collections.abc import Generator from math import sin def SCREAMING_SNAKE_CASE_ ( __magic_name__ : bytes ) -> bytes: """simple docstring""" if len(__magic_name__ ) != 32: raise ValueError("""Input must be of length 32""" ) UpperCamelCase :int = B"""""" for i in [3, 2, 1, 0]: little_endian += string_aa[8 * i : 8 * i + 8] return little_endian def SCREAMING_SNAKE_CASE_ ( __magic_name__ : int ) -> bytes: """simple docstring""" if i < 0: raise ValueError("""Input must be non-negative""" ) UpperCamelCase :Any = format(__magic_name__ , """08x""" )[-8:] UpperCamelCase :Union[str, Any] = B"""""" for i in [3, 2, 1, 0]: little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode("""utf-8""" ) return little_endian_hex def SCREAMING_SNAKE_CASE_ ( __magic_name__ : bytes ) -> bytes: """simple docstring""" UpperCamelCase :str = B"""""" for char in message: bit_string += format(__magic_name__ , """08b""" ).encode("""utf-8""" ) UpperCamelCase :Any = format(len(__magic_name__ ) , """064b""" ).encode("""utf-8""" ) # Pad bit_string to a multiple of 512 chars bit_string += b"1" while len(__magic_name__ ) % 512 != 448: bit_string += b"0" bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] ) return bit_string def SCREAMING_SNAKE_CASE_ ( __magic_name__ : bytes ) -> Generator[list[int], None, None]: """simple docstring""" if len(__magic_name__ ) % 512 != 0: raise ValueError("""Input must have length that's a multiple of 512""" ) for pos in range(0 , len(__magic_name__ ) , 512 ): UpperCamelCase :Tuple = bit_string[pos : pos + 512] UpperCamelCase :Optional[int] = [] for i in range(0 , 512 , 32 ): block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) ) yield block_words def SCREAMING_SNAKE_CASE_ ( __magic_name__ : int ) -> int: """simple docstring""" if i < 0: raise ValueError("""Input must be non-negative""" ) UpperCamelCase :List[str] = format(__magic_name__ , """032b""" ) UpperCamelCase :Any = """""" for c in i_str: new_str += "1" if c == "0" else "0" return int(__magic_name__ , 2 ) def SCREAMING_SNAKE_CASE_ ( __magic_name__ : int , __magic_name__ : int ) -> int: """simple docstring""" return (a + b) % 2**32 def SCREAMING_SNAKE_CASE_ ( __magic_name__ : int , __magic_name__ : int ) -> int: """simple docstring""" if i < 0: raise ValueError("""Input must be non-negative""" ) if shift < 0: raise ValueError("""Shift must be non-negative""" ) return ((i << shift) ^ (i >> (32 - shift))) % 2**32 def SCREAMING_SNAKE_CASE_ ( __magic_name__ : bytes ) -> bytes: """simple docstring""" UpperCamelCase :Tuple = preprocess(__magic_name__ ) UpperCamelCase :List[str] = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )] # Starting states UpperCamelCase :Union[str, Any] = 0X67_45_23_01 UpperCamelCase :Union[str, Any] = 0XEF_CD_AB_89 UpperCamelCase :List[str] = 0X98_BA_DC_FE UpperCamelCase :int = 0X10_32_54_76 UpperCamelCase :int = [ 7, 12, 17, 22, 7, 12, 17, 22, 7, 12, 17, 22, 7, 12, 17, 22, 5, 9, 14, 20, 5, 9, 14, 20, 5, 9, 14, 20, 5, 9, 14, 20, 4, 11, 16, 23, 4, 11, 16, 23, 4, 11, 16, 23, 4, 11, 16, 23, 6, 10, 15, 21, 6, 10, 15, 21, 6, 10, 15, 21, 6, 10, 15, 21, ] # Process bit string in chunks, each with 16 32-char words for block_words in get_block_words(__magic_name__ ): UpperCamelCase :Optional[Any] = aa UpperCamelCase :Any = ba UpperCamelCase :Tuple = ca UpperCamelCase :List[str] = da # Hash current chunk for i in range(64 ): if i <= 15: # f = (b & c) | (not_32(b) & d) # Alternate definition for f UpperCamelCase :int = d ^ (b & (c ^ d)) UpperCamelCase :Optional[int] = i elif i <= 31: # f = (d & b) | (not_32(d) & c) # Alternate definition for f UpperCamelCase :str = c ^ (d & (b ^ c)) UpperCamelCase :Union[str, Any] = (5 * i + 1) % 16 elif i <= 47: UpperCamelCase :str = b ^ c ^ d UpperCamelCase :Optional[int] = (3 * i + 5) % 16 else: UpperCamelCase :List[str] = c ^ (b | not_aa(__magic_name__ )) UpperCamelCase :int = (7 * i) % 16 UpperCamelCase :Dict = (f + a + added_consts[i] + block_words[g]) % 2**32 UpperCamelCase :Tuple = d UpperCamelCase :str = c UpperCamelCase :Tuple = b UpperCamelCase :Optional[Any] = sum_aa(__magic_name__ , left_rotate_aa(__magic_name__ , shift_amounts[i] ) ) # Add hashed chunk to running total UpperCamelCase :List[str] = sum_aa(__magic_name__ , __magic_name__ ) UpperCamelCase :str = sum_aa(__magic_name__ , __magic_name__ ) UpperCamelCase :int = sum_aa(__magic_name__ , __magic_name__ ) UpperCamelCase :Optional[Any] = sum_aa(__magic_name__ , __magic_name__ ) UpperCamelCase :Optional[Any] = reformat_hex(__magic_name__ ) + reformat_hex(__magic_name__ ) + reformat_hex(__magic_name__ ) + reformat_hex(__magic_name__ ) return digest if __name__ == "__main__": import doctest doctest.testmod()
38
import argparse import torch # Step 1. clone https://github.com/microsoft/unilm # Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd # Step 3. cd unilm # Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink # import classes from unilm.wavlm.WavLM import WavLM as WavLMOrig from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig from transformers import WavLMConfig, WavLMModel, logging logging.set_verbosity_info() UpperCAmelCase_ : Optional[Any] = logging.get_logger(__name__) UpperCAmelCase_ : Optional[Any] = { '''post_extract_proj''': '''feature_projection.projection''', '''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''', '''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''', '''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''', '''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''', '''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''', '''self_attn.grep_linear''': '''encoder.layers.*.attention.gru_rel_pos_linear''', '''self_attn.relative_attention_bias''': '''encoder.layers.*.attention.rel_attn_embed''', '''self_attn.grep_a''': '''encoder.layers.*.attention.gru_rel_pos_const''', '''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''', '''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''', '''fc2''': '''encoder.layers.*.feed_forward.output_dense''', '''final_layer_norm''': '''encoder.layers.*.final_layer_norm''', '''encoder.layer_norm''': '''encoder.layer_norm''', '''w2v_model.layer_norm''': '''feature_projection.layer_norm''', '''quantizer.weight_proj''': '''quantizer.weight_proj''', '''quantizer.vars''': '''quantizer.codevectors''', '''project_q''': '''project_q''', '''final_proj''': '''project_hid''', '''w2v_encoder.proj''': '''ctc_proj''', '''mask_emb''': '''masked_spec_embed''', } UpperCAmelCase_ : int = [ '''ctc_proj''', '''quantizer.weight_proj''', '''quantizer.codevectors''', '''project_q''', '''project_hid''', ] def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Dict , __magic_name__ : Dict , __magic_name__ : Union[str, Any] , __magic_name__ : Tuple , __magic_name__ : Optional[int] ) -> Dict: """simple docstring""" for attribute in key.split(""".""" ): UpperCamelCase :Dict = getattr(__magic_name__ , __magic_name__ ) if weight_type is not None: UpperCamelCase :Optional[int] = getattr(__magic_name__ , __magic_name__ ).shape else: UpperCamelCase :Optional[int] = hf_pointer.shape assert hf_shape == value.shape, ( f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be""" f""" {value.shape} for {full_name}""" ) if weight_type == "weight": UpperCamelCase :str = value elif weight_type == "weight_g": UpperCamelCase :int = value elif weight_type == "weight_v": UpperCamelCase :int = value elif weight_type == "bias": UpperCamelCase :List[Any] = value else: UpperCamelCase :Any = value logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" ) def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Optional[Any] , __magic_name__ : List[str] ) -> Optional[Any]: """simple docstring""" UpperCamelCase :Union[str, Any] = [] UpperCamelCase :Dict = fairseq_model.state_dict() UpperCamelCase :int = hf_model.feature_extractor for name, value in fairseq_dict.items(): UpperCamelCase :str = False if "conv_layers" in name: load_conv_layer( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , hf_model.config.feat_extract_norm == """group""" , ) UpperCamelCase :Union[str, Any] = True else: for key, mapped_key in MAPPING.items(): if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]: UpperCamelCase :Optional[int] = True if "*" in mapped_key: UpperCamelCase :List[Any] = name.split(__magic_name__ )[0].split(""".""" )[-2] UpperCamelCase :int = mapped_key.replace("""*""" , __magic_name__ ) if "weight_g" in name: UpperCamelCase :List[Any] = """weight_g""" elif "weight_v" in name: UpperCamelCase :List[Any] = """weight_v""" elif "bias" in name and "relative_attention_bias" not in name: UpperCamelCase :Any = """bias""" elif "weight" in name: # TODO: don't match quantizer.weight_proj UpperCamelCase :List[str] = """weight""" else: UpperCamelCase :Optional[int] = None set_recursively(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) continue if not is_used: unused_weights.append(__magic_name__ ) logger.warning(f"""Unused weights: {unused_weights}""" ) def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Any , __magic_name__ : str , __magic_name__ : int , __magic_name__ : int , __magic_name__ : List[str] ) -> Dict: """simple docstring""" UpperCamelCase :Dict = full_name.split("""conv_layers.""" )[-1] UpperCamelCase :int = name.split(""".""" ) UpperCamelCase :str = int(items[0] ) UpperCamelCase :str = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) UpperCamelCase :Tuple = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) UpperCamelCase :Dict = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was""" " found." ) UpperCamelCase :Tuple = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" ) UpperCamelCase :Union[str, Any] = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(__magic_name__ ) @torch.no_grad() def SCREAMING_SNAKE_CASE_ ( __magic_name__ : List[str] , __magic_name__ : List[Any] , __magic_name__ : str=None ) -> int: """simple docstring""" UpperCamelCase :List[Any] = torch.load(__magic_name__ ) UpperCamelCase :List[Any] = WavLMConfigOrig(checkpoint["""cfg"""] ) UpperCamelCase :int = WavLMOrig(__magic_name__ ) model.load_state_dict(checkpoint["""model"""] ) model.eval() if config_path is not None: UpperCamelCase :List[Any] = WavLMConfig.from_pretrained(__magic_name__ ) else: UpperCamelCase :Any = WavLMConfig() UpperCamelCase :Dict = WavLMModel(__magic_name__ ) recursively_load_weights(__magic_name__ , __magic_name__ ) hf_wavlm.save_pretrained(__magic_name__ ) if __name__ == "__main__": UpperCAmelCase_ : Union[str, Any] = argparse.ArgumentParser() parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') UpperCAmelCase_ : Optional[int] = parser.parse_args() convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
38
1
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : int ) -> bool: """simple docstring""" UpperCamelCase :str = n ** (1 / 3) return (val * val * val) == n if __name__ == "__main__": print(perfect_cube(27)) print(perfect_cube(4))
38
import html from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin from ...utils import is_bsa_available, logging, requires_backends if is_bsa_available(): import bsa from bsa import BeautifulSoup UpperCAmelCase_ : Any = logging.get_logger(__name__) class _SCREAMING_SNAKE_CASE ( _a ): def __init__( self : Optional[int] , **__lowerCamelCase : Optional[int] ): requires_backends(self , ["""bs4"""] ) super().__init__(**__lowerCamelCase ) def _A ( self : List[str] , __lowerCamelCase : Any ): UpperCamelCase :Optional[int] = [] UpperCamelCase :List[str] = [] UpperCamelCase :Union[str, Any] = element if element.name else element.parent for parent in child.parents: # type: bs4.element.Tag UpperCamelCase :Optional[Any] = parent.find_all(child.name , recursive=__lowerCamelCase ) xpath_tags.append(child.name ) xpath_subscripts.append( 0 if 1 == len(__lowerCamelCase ) else next(i for i, s in enumerate(__lowerCamelCase , 1 ) if s is child ) ) UpperCamelCase :Any = parent xpath_tags.reverse() xpath_subscripts.reverse() return xpath_tags, xpath_subscripts def _A ( self : Any , __lowerCamelCase : Tuple ): UpperCamelCase :Any = BeautifulSoup(__lowerCamelCase , """html.parser""" ) UpperCamelCase :Union[str, Any] = [] UpperCamelCase :Tuple = [] UpperCamelCase :Tuple = [] for element in html_code.descendants: if type(__lowerCamelCase ) == bsa.element.NavigableString: if type(element.parent ) != bsa.element.Tag: continue UpperCamelCase :Any = html.unescape(__lowerCamelCase ).strip() if not text_in_this_tag: continue all_doc_strings.append(__lowerCamelCase ) UpperCamelCase , UpperCamelCase :Optional[Any] = self.xpath_soup(__lowerCamelCase ) stringaxtag_seq.append(__lowerCamelCase ) stringaxsubs_seq.append(__lowerCamelCase ) if len(__lowerCamelCase ) != len(__lowerCamelCase ): raise ValueError("""Number of doc strings and xtags does not correspond""" ) if len(__lowerCamelCase ) != len(__lowerCamelCase ): raise ValueError("""Number of doc strings and xsubs does not correspond""" ) return all_doc_strings, stringaxtag_seq, stringaxsubs_seq def _A ( self : int , __lowerCamelCase : List[Any] , __lowerCamelCase : List[str] ): UpperCamelCase :Tuple = """""" for tagname, subs in zip(__lowerCamelCase , __lowerCamelCase ): xpath += F"""/{tagname}""" if subs != 0: xpath += F"""[{subs}]""" return xpath def __call__( self : Any , __lowerCamelCase : Dict ): UpperCamelCase :Any = False # Check that strings has a valid type if isinstance(__lowerCamelCase , __lowerCamelCase ): UpperCamelCase :List[Any] = True elif isinstance(__lowerCamelCase , (list, tuple) ): if len(__lowerCamelCase ) == 0 or isinstance(html_strings[0] , __lowerCamelCase ): UpperCamelCase :Any = True if not valid_strings: raise ValueError( """HTML strings must of type `str`, `List[str]` (batch of examples), """ F"""but is of type {type(__lowerCamelCase )}.""" ) UpperCamelCase :str = bool(isinstance(__lowerCamelCase , (list, tuple) ) and (isinstance(html_strings[0] , __lowerCamelCase )) ) if not is_batched: UpperCamelCase :Any = [html_strings] # Get nodes + xpaths UpperCamelCase :Union[str, Any] = [] UpperCamelCase :str = [] for html_string in html_strings: UpperCamelCase , UpperCamelCase , UpperCamelCase :int = self.get_three_from_single(__lowerCamelCase ) nodes.append(__lowerCamelCase ) UpperCamelCase :int = [] for node, tag_list, sub_list in zip(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ): UpperCamelCase :str = self.construct_xpath(__lowerCamelCase , __lowerCamelCase ) xpath_strings.append(__lowerCamelCase ) xpaths.append(__lowerCamelCase ) # return as Dict UpperCamelCase :Optional[int] = {"""nodes""": nodes, """xpaths""": xpaths} UpperCamelCase :Any = BatchFeature(data=__lowerCamelCase , tensor_type=__lowerCamelCase ) return encoded_inputs
38
1
import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu enable_full_determinism() class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): def _A ( self : Union[str, Any] ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() @property def _A ( self : Dict ): UpperCamelCase :Tuple = 1 UpperCamelCase :Optional[Any] = 3 UpperCamelCase :Union[str, Any] = (32, 32) UpperCamelCase :List[Any] = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(__lowerCamelCase ) return image @property def _A ( self : Optional[Any] ): torch.manual_seed(0 ) UpperCamelCase :str = UNetaDConditionModel( block_out_channels=(32, 32, 64) , layers_per_block=2 , sample_size=32 , in_channels=7 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=8 , use_linear_projection=__lowerCamelCase , only_cross_attention=(True, True, False) , num_class_embeds=100 , ) return model @property def _A ( self : Optional[Any] ): torch.manual_seed(0 ) UpperCamelCase :Dict = AutoencoderKL( block_out_channels=[32, 32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , ) return model @property def _A ( self : str ): torch.manual_seed(0 ) UpperCamelCase :Union[str, Any] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act="""gelu""" , projection_dim=512 , ) return CLIPTextModel(__lowerCamelCase ) def _A ( self : Union[str, Any] ): UpperCamelCase :List[str] = """cpu""" # ensure determinism for the device-dependent torch.Generator UpperCamelCase :Optional[Any] = self.dummy_cond_unet_upscale UpperCamelCase :Optional[Any] = DDPMScheduler() UpperCamelCase :Optional[int] = DDIMScheduler(prediction_type="""v_prediction""" ) UpperCamelCase :List[str] = self.dummy_vae UpperCamelCase :Optional[Any] = self.dummy_text_encoder UpperCamelCase :Tuple = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) UpperCamelCase :List[str] = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0] UpperCamelCase :int = Image.fromarray(np.uinta(__lowerCamelCase ) ).convert("""RGB""" ).resize((64, 64) ) # make sure here that pndm scheduler skips prk UpperCamelCase :List[Any] = StableDiffusionUpscalePipeline( unet=__lowerCamelCase , low_res_scheduler=__lowerCamelCase , scheduler=__lowerCamelCase , vae=__lowerCamelCase , text_encoder=__lowerCamelCase , tokenizer=__lowerCamelCase , max_noise_level=350 , ) UpperCamelCase :Any = sd_pipe.to(__lowerCamelCase ) sd_pipe.set_progress_bar_config(disable=__lowerCamelCase ) UpperCamelCase :Optional[Any] = """A painting of a squirrel eating a burger""" UpperCamelCase :Optional[Any] = torch.Generator(device=__lowerCamelCase ).manual_seed(0 ) UpperCamelCase :List[str] = sd_pipe( [prompt] , image=__lowerCamelCase , generator=__lowerCamelCase , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="""np""" , ) UpperCamelCase :Tuple = output.images UpperCamelCase :Optional[Any] = torch.Generator(device=__lowerCamelCase ).manual_seed(0 ) UpperCamelCase :Optional[int] = sd_pipe( [prompt] , image=__lowerCamelCase , generator=__lowerCamelCase , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="""np""" , return_dict=__lowerCamelCase , )[0] UpperCamelCase :Union[str, Any] = image[0, -3:, -3:, -1] UpperCamelCase :List[str] = image_from_tuple[0, -3:, -3:, -1] UpperCamelCase :Dict = low_res_image.size[0] * 4 assert image.shape == (1, expected_height_width, expected_height_width, 3) UpperCamelCase :str = np.array([0.3113, 0.3910, 0.4272, 0.4859, 0.5061, 0.4652, 0.5362, 0.5715, 0.5661] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 def _A ( self : Union[str, Any] ): UpperCamelCase :Dict = """cpu""" # ensure determinism for the device-dependent torch.Generator UpperCamelCase :Optional[Any] = self.dummy_cond_unet_upscale UpperCamelCase :Dict = DDPMScheduler() UpperCamelCase :int = DDIMScheduler(prediction_type="""v_prediction""" ) UpperCamelCase :List[str] = self.dummy_vae UpperCamelCase :Optional[int] = self.dummy_text_encoder UpperCamelCase :str = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) UpperCamelCase :List[str] = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0] UpperCamelCase :Tuple = Image.fromarray(np.uinta(__lowerCamelCase ) ).convert("""RGB""" ).resize((64, 64) ) # make sure here that pndm scheduler skips prk UpperCamelCase :str = StableDiffusionUpscalePipeline( unet=__lowerCamelCase , low_res_scheduler=__lowerCamelCase , scheduler=__lowerCamelCase , vae=__lowerCamelCase , text_encoder=__lowerCamelCase , tokenizer=__lowerCamelCase , max_noise_level=350 , ) UpperCamelCase :List[str] = sd_pipe.to(__lowerCamelCase ) sd_pipe.set_progress_bar_config(disable=__lowerCamelCase ) UpperCamelCase :Union[str, Any] = """A painting of a squirrel eating a burger""" UpperCamelCase :Dict = sd_pipe( 2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="""np""" , ) UpperCamelCase :List[str] = output.images assert image.shape[0] == 2 UpperCamelCase :Any = torch.Generator(device=__lowerCamelCase ).manual_seed(0 ) UpperCamelCase :Dict = sd_pipe( [prompt] , image=__lowerCamelCase , generator=__lowerCamelCase , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="""np""" , ) UpperCamelCase :Optional[Any] = output.images assert image.shape[0] == 2 @unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" ) def _A ( self : Any ): UpperCamelCase :Union[str, Any] = self.dummy_cond_unet_upscale UpperCamelCase :Tuple = DDPMScheduler() UpperCamelCase :str = DDIMScheduler(prediction_type="""v_prediction""" ) UpperCamelCase :int = self.dummy_vae UpperCamelCase :Union[str, Any] = self.dummy_text_encoder UpperCamelCase :List[str] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) UpperCamelCase :int = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0] UpperCamelCase :Tuple = Image.fromarray(np.uinta(__lowerCamelCase ) ).convert("""RGB""" ).resize((64, 64) ) # put models in fp16, except vae as it overflows in fp16 UpperCamelCase :str = unet.half() UpperCamelCase :Optional[int] = text_encoder.half() # make sure here that pndm scheduler skips prk UpperCamelCase :str = StableDiffusionUpscalePipeline( unet=__lowerCamelCase , low_res_scheduler=__lowerCamelCase , scheduler=__lowerCamelCase , vae=__lowerCamelCase , text_encoder=__lowerCamelCase , tokenizer=__lowerCamelCase , max_noise_level=350 , ) UpperCamelCase :Tuple = sd_pipe.to(__lowerCamelCase ) sd_pipe.set_progress_bar_config(disable=__lowerCamelCase ) UpperCamelCase :Optional[int] = """A painting of a squirrel eating a burger""" UpperCamelCase :Tuple = torch.manual_seed(0 ) UpperCamelCase :List[Any] = sd_pipe( [prompt] , image=__lowerCamelCase , generator=__lowerCamelCase , num_inference_steps=2 , output_type="""np""" , ).images UpperCamelCase :str = low_res_image.size[0] * 4 assert image.shape == (1, expected_height_width, expected_height_width, 3) @slow @require_torch_gpu class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): def _A ( self : Any ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def _A ( self : str ): UpperCamelCase :Optional[Any] = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/sd2-upscale/low_res_cat.png""" ) UpperCamelCase :str = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale""" """/upsampled_cat.npy""" ) UpperCamelCase :List[Any] = """stabilityai/stable-diffusion-x4-upscaler""" UpperCamelCase :List[str] = StableDiffusionUpscalePipeline.from_pretrained(__lowerCamelCase ) pipe.to(__lowerCamelCase ) pipe.set_progress_bar_config(disable=__lowerCamelCase ) pipe.enable_attention_slicing() UpperCamelCase :Optional[int] = """a cat sitting on a park bench""" UpperCamelCase :str = torch.manual_seed(0 ) UpperCamelCase :List[Any] = pipe( prompt=__lowerCamelCase , image=__lowerCamelCase , generator=__lowerCamelCase , output_type="""np""" , ) UpperCamelCase :int = output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image ).max() < 1E-3 def _A ( self : Dict ): UpperCamelCase :Dict = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/sd2-upscale/low_res_cat.png""" ) UpperCamelCase :Union[str, Any] = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale""" """/upsampled_cat_fp16.npy""" ) UpperCamelCase :Any = """stabilityai/stable-diffusion-x4-upscaler""" UpperCamelCase :Union[str, Any] = StableDiffusionUpscalePipeline.from_pretrained( __lowerCamelCase , torch_dtype=torch.floataa , ) pipe.to(__lowerCamelCase ) pipe.set_progress_bar_config(disable=__lowerCamelCase ) pipe.enable_attention_slicing() UpperCamelCase :Optional[int] = """a cat sitting on a park bench""" UpperCamelCase :str = torch.manual_seed(0 ) UpperCamelCase :Tuple = pipe( prompt=__lowerCamelCase , image=__lowerCamelCase , generator=__lowerCamelCase , output_type="""np""" , ) UpperCamelCase :Any = output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image ).max() < 5E-1 def _A ( self : str ): torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() UpperCamelCase :Tuple = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/sd2-upscale/low_res_cat.png""" ) UpperCamelCase :Any = """stabilityai/stable-diffusion-x4-upscaler""" UpperCamelCase :Tuple = StableDiffusionUpscalePipeline.from_pretrained( __lowerCamelCase , torch_dtype=torch.floataa , ) pipe.to(__lowerCamelCase ) pipe.set_progress_bar_config(disable=__lowerCamelCase ) pipe.enable_attention_slicing(1 ) pipe.enable_sequential_cpu_offload() UpperCamelCase :Optional[Any] = """a cat sitting on a park bench""" UpperCamelCase :Any = torch.manual_seed(0 ) UpperCamelCase :Dict = pipe( prompt=__lowerCamelCase , image=__lowerCamelCase , generator=__lowerCamelCase , num_inference_steps=5 , output_type="""np""" , ) UpperCamelCase :List[Any] = torch.cuda.max_memory_allocated() # make sure that less than 2.9 GB is allocated assert mem_bytes < 2.9 * 10**9
38
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : list[list[int]] , __magic_name__ : int , __magic_name__ : int , __magic_name__ : list[int] ) -> bool: """simple docstring""" if graph[path[curr_ind - 1]][next_ver] == 0: return False # 2. Validate that next vertex is not already in path return not any(vertex == next_ver for vertex in path ) def SCREAMING_SNAKE_CASE_ ( __magic_name__ : list[list[int]] , __magic_name__ : list[int] , __magic_name__ : int ) -> bool: """simple docstring""" if curr_ind == len(__magic_name__ ): # return whether path exists between current and starting vertices return graph[path[curr_ind - 1]][path[0]] == 1 # Recursive Step for next_ver in range(0 , len(__magic_name__ ) ): if valid_connection(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ): # Insert current vertex into path as next transition UpperCamelCase :str = next_ver # Validate created path if util_hamilton_cycle(__magic_name__ , __magic_name__ , curr_ind + 1 ): return True # Backtrack UpperCamelCase :Union[str, Any] = -1 return False def SCREAMING_SNAKE_CASE_ ( __magic_name__ : list[list[int]] , __magic_name__ : int = 0 ) -> list[int]: """simple docstring""" UpperCamelCase :Union[str, Any] = [-1] * (len(__magic_name__ ) + 1) # initialize start and end of path with starting index UpperCamelCase :Any = start_index # evaluate and if we find answer return path either return empty array return path if util_hamilton_cycle(__magic_name__ , __magic_name__ , 1 ) else []
38
1
import math import os from copy import deepcopy import datasets import evaluate import torch import transformers from datasets import load_dataset from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer from accelerate import Accelerator from accelerate.test_utils import RegressionDataset, RegressionModel from accelerate.utils import is_tpu_available, set_seed UpperCAmelCase_ : Dict = '''true''' def SCREAMING_SNAKE_CASE_ ( __magic_name__ : List[str] , __magic_name__ : Optional[Any]=82 , __magic_name__ : Any=16 ) -> Optional[Any]: """simple docstring""" set_seed(42 ) UpperCamelCase :List[Any] = RegressionModel() UpperCamelCase :Any = deepcopy(__magic_name__ ) UpperCamelCase :List[Any] = RegressionDataset(length=__magic_name__ ) UpperCamelCase :List[Any] = DataLoader(__magic_name__ , batch_size=__magic_name__ ) model.to(accelerator.device ) UpperCamelCase , UpperCamelCase :Tuple = accelerator.prepare(__magic_name__ , __magic_name__ ) return model, ddp_model, dataloader def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Accelerator , __magic_name__ : List[Any]=False ) -> Any: """simple docstring""" UpperCamelCase :int = AutoTokenizer.from_pretrained("""hf-internal-testing/mrpc-bert-base-cased""" ) UpperCamelCase :Optional[int] = load_dataset("""glue""" , """mrpc""" , split="""validation""" ) def tokenize_function(__magic_name__ : int ): UpperCamelCase :Tuple = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=__magic_name__ , max_length=__magic_name__ ) return outputs with accelerator.main_process_first(): UpperCamelCase :List[str] = dataset.map( __magic_name__ , batched=__magic_name__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , ) UpperCamelCase :str = tokenized_datasets.rename_column("""label""" , """labels""" ) def collate_fn(__magic_name__ : Any ): if use_longest: return tokenizer.pad(__magic_name__ , padding="""longest""" , return_tensors="""pt""" ) return tokenizer.pad(__magic_name__ , padding="""max_length""" , max_length=128 , return_tensors="""pt""" ) return DataLoader(__magic_name__ , shuffle=__magic_name__ , collate_fn=__magic_name__ , batch_size=16 ) def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Union[str, Any] , __magic_name__ : Union[str, Any] ) -> List[str]: """simple docstring""" UpperCamelCase :Union[str, Any] = Accelerator(dispatch_batches=__magic_name__ , split_batches=__magic_name__ ) UpperCamelCase :Any = get_dataloader(__magic_name__ , not dispatch_batches ) UpperCamelCase :Optional[int] = AutoModelForSequenceClassification.from_pretrained( """hf-internal-testing/mrpc-bert-base-cased""" , return_dict=__magic_name__ ) UpperCamelCase , UpperCamelCase :Optional[Any] = accelerator.prepare(__magic_name__ , __magic_name__ ) return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Union[str, Any] , __magic_name__ : Optional[Any] , __magic_name__ : str ) -> int: """simple docstring""" UpperCamelCase :Optional[int] = [] for batch in dataloader: UpperCamelCase , UpperCamelCase :str = batch.values() with torch.no_grad(): UpperCamelCase :Any = model(__magic_name__ ) UpperCamelCase , UpperCamelCase :List[str] = accelerator.gather_for_metrics((logit, target) ) logits_and_targets.append((logit, target) ) UpperCamelCase , UpperCamelCase :Union[str, Any] = [], [] for logit, targ in logits_and_targets: logits.append(__magic_name__ ) targs.append(__magic_name__ ) UpperCamelCase , UpperCamelCase :Union[str, Any] = torch.cat(__magic_name__ ), torch.cat(__magic_name__ ) return logits, targs def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Accelerator , __magic_name__ : Optional[Any]=82 , __magic_name__ : int=False , __magic_name__ : Dict=False , __magic_name__ : Tuple=16 ) -> Union[str, Any]: """simple docstring""" UpperCamelCase , UpperCamelCase , UpperCamelCase :int = get_basic_setup(__magic_name__ , __magic_name__ , __magic_name__ ) UpperCamelCase , UpperCamelCase :Any = generate_predictions(__magic_name__ , __magic_name__ , __magic_name__ ) assert ( len(__magic_name__ ) == num_samples ), f"""Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(__magic_name__ )}""" def SCREAMING_SNAKE_CASE_ ( __magic_name__ : bool = False , __magic_name__ : bool = False ) -> List[str]: """simple docstring""" UpperCamelCase :Optional[int] = evaluate.load("""glue""" , """mrpc""" ) UpperCamelCase , UpperCamelCase :Optional[Any] = get_mrpc_setup(__magic_name__ , __magic_name__ ) # First do baseline UpperCamelCase , UpperCamelCase , UpperCamelCase :Dict = setup["""no"""] model.to(__magic_name__ ) model.eval() for batch in dataloader: batch.to(__magic_name__ ) with torch.inference_mode(): UpperCamelCase :str = model(**__magic_name__ ) UpperCamelCase :List[str] = outputs.logits.argmax(dim=-1 ) metric.add_batch(predictions=__magic_name__ , references=batch["""labels"""] ) UpperCamelCase :Optional[Any] = metric.compute() # Then do distributed UpperCamelCase , UpperCamelCase , UpperCamelCase :List[Any] = setup["""ddp"""] model.eval() for batch in dataloader: with torch.inference_mode(): UpperCamelCase :Union[str, Any] = model(**__magic_name__ ) UpperCamelCase :Union[str, Any] = outputs.logits.argmax(dim=-1 ) UpperCamelCase :Optional[Any] = batch["""labels"""] UpperCamelCase , UpperCamelCase :List[Any] = accelerator.gather_for_metrics((preds, references) ) metric.add_batch(predictions=__magic_name__ , references=__magic_name__ ) UpperCamelCase :Optional[Any] = metric.compute() for key in "accuracy f1".split(): assert math.isclose( baseline[key] , distributed[key] ), f"""Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n""" def SCREAMING_SNAKE_CASE_ ( ) -> List[Any]: """simple docstring""" UpperCamelCase :Tuple = Accelerator(split_batches=__magic_name__ , dispatch_batches=__magic_name__ ) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_warning() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() # These are a bit slower so they should only be ran on the GPU or TPU if torch.cuda.is_available() or is_tpu_available(): if accelerator.is_local_main_process: print("""**Testing gather_for_metrics**""" ) for split_batches in [True, False]: for dispatch_batches in [True, False]: if accelerator.is_local_main_process: print(f"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`""" ) test_mrpc(__magic_name__ , __magic_name__ ) accelerator.state._reset_state() if accelerator.is_local_main_process: print("""**Test torch metrics**""" ) for split_batches in [True, False]: for dispatch_batches in [True, False]: UpperCamelCase :Dict = Accelerator(split_batches=__magic_name__ , dispatch_batches=__magic_name__ ) if accelerator.is_local_main_process: print(f"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99""" ) test_torch_metrics(__magic_name__ , 99 ) accelerator.state._reset_state() if accelerator.is_local_main_process: print("""**Test last batch is not dropped when perfectly divisible**""" ) UpperCamelCase :Union[str, Any] = Accelerator() test_torch_metrics(__magic_name__ , 512 ) accelerator.state._reset_state() def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Dict ) -> Tuple: """simple docstring""" main() if __name__ == "__main__": main()
38
import os import tempfile import unittest from transformers import FlaubertConfig, is_torch_available from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( FlaubertForMultipleChoice, FlaubertForQuestionAnswering, FlaubertForQuestionAnsweringSimple, FlaubertForSequenceClassification, FlaubertForTokenClassification, FlaubertModel, FlaubertWithLMHeadModel, ) from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST class _SCREAMING_SNAKE_CASE ( _a ): def __init__( self : List[str] , __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any]=13 , __lowerCamelCase : str=7 , __lowerCamelCase : Tuple=True , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : Any=True , __lowerCamelCase : List[Any]=True , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : Optional[int]=False , __lowerCamelCase : str=False , __lowerCamelCase : List[Any]=False , __lowerCamelCase : List[Any]=2 , __lowerCamelCase : Union[str, Any]=99 , __lowerCamelCase : Optional[Any]=0 , __lowerCamelCase : Tuple=32 , __lowerCamelCase : Any=5 , __lowerCamelCase : Optional[Any]=4 , __lowerCamelCase : Union[str, Any]=0.1 , __lowerCamelCase : Union[str, Any]=0.1 , __lowerCamelCase : List[str]=512 , __lowerCamelCase : List[Any]=12 , __lowerCamelCase : int=2 , __lowerCamelCase : List[str]=0.02 , __lowerCamelCase : Union[str, Any]=3 , __lowerCamelCase : Tuple=4 , __lowerCamelCase : Optional[int]="last" , __lowerCamelCase : Optional[Any]=None , __lowerCamelCase : List[str]=None , ): UpperCamelCase :int = parent UpperCamelCase :Optional[int] = batch_size UpperCamelCase :str = seq_length UpperCamelCase :Optional[int] = is_training UpperCamelCase :Optional[int] = use_input_lengths UpperCamelCase :Union[str, Any] = use_token_type_ids UpperCamelCase :List[str] = use_labels UpperCamelCase :Dict = gelu_activation UpperCamelCase :Optional[int] = sinusoidal_embeddings UpperCamelCase :List[Any] = causal UpperCamelCase :Optional[int] = asm UpperCamelCase :List[str] = n_langs UpperCamelCase :int = vocab_size UpperCamelCase :List[Any] = n_special UpperCamelCase :List[Any] = hidden_size UpperCamelCase :List[str] = num_hidden_layers UpperCamelCase :List[Any] = num_attention_heads UpperCamelCase :Tuple = hidden_dropout_prob UpperCamelCase :List[str] = attention_probs_dropout_prob UpperCamelCase :Tuple = max_position_embeddings UpperCamelCase :List[str] = type_vocab_size UpperCamelCase :Union[str, Any] = type_sequence_label_size UpperCamelCase :int = initializer_range UpperCamelCase :List[str] = num_labels UpperCamelCase :Optional[int] = num_choices UpperCamelCase :Optional[Any] = summary_type UpperCamelCase :Tuple = use_proj UpperCamelCase :Optional[Any] = scope def _A ( self : List[str] ): UpperCamelCase :Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCamelCase :Any = random_attention_mask([self.batch_size, self.seq_length] ) UpperCamelCase :List[Any] = None if self.use_input_lengths: UpperCamelCase :Dict = ( ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2 ) # small variation of seq_length UpperCamelCase :str = None if self.use_token_type_ids: UpperCamelCase :int = ids_tensor([self.batch_size, self.seq_length] , self.n_langs ) UpperCamelCase :Optional[int] = None UpperCamelCase :int = None UpperCamelCase :List[Any] = None if self.use_labels: UpperCamelCase :Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCamelCase :List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCamelCase :List[str] = ids_tensor([self.batch_size] , 2 ).float() UpperCamelCase :List[str] = ids_tensor([self.batch_size] , self.num_choices ) UpperCamelCase :Union[str, Any] = self.get_config() return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def _A ( self : List[Any] ): return FlaubertConfig( vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , ) def _A ( self : Union[str, Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : int , __lowerCamelCase : Tuple , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] , __lowerCamelCase : Any , __lowerCamelCase : int , ): UpperCamelCase :Tuple = FlaubertModel(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() UpperCamelCase :int = model(__lowerCamelCase , lengths=__lowerCamelCase , langs=__lowerCamelCase ) UpperCamelCase :List[Any] = model(__lowerCamelCase , langs=__lowerCamelCase ) UpperCamelCase :int = model(__lowerCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _A ( self : Optional[int] , __lowerCamelCase : List[str] , __lowerCamelCase : Any , __lowerCamelCase : Tuple , __lowerCamelCase : int , __lowerCamelCase : List[Any] , __lowerCamelCase : Tuple , __lowerCamelCase : List[Any] , __lowerCamelCase : Any , __lowerCamelCase : Dict , ): UpperCamelCase :Any = FlaubertWithLMHeadModel(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() UpperCamelCase :Dict = model(__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _A ( self : List[Any] , __lowerCamelCase : str , __lowerCamelCase : Tuple , __lowerCamelCase : List[str] , __lowerCamelCase : int , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : str , __lowerCamelCase : List[str] , __lowerCamelCase : List[Any] , ): UpperCamelCase :Any = FlaubertForQuestionAnsweringSimple(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() UpperCamelCase :Any = model(__lowerCamelCase ) UpperCamelCase :int = model(__lowerCamelCase , start_positions=__lowerCamelCase , end_positions=__lowerCamelCase ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def _A ( self : str , __lowerCamelCase : int , __lowerCamelCase : str , __lowerCamelCase : Optional[Any] , __lowerCamelCase : int , __lowerCamelCase : Tuple , __lowerCamelCase : Any , __lowerCamelCase : List[str] , __lowerCamelCase : Dict , __lowerCamelCase : str , ): UpperCamelCase :str = FlaubertForQuestionAnswering(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() UpperCamelCase :Any = model(__lowerCamelCase ) UpperCamelCase :Optional[int] = model( __lowerCamelCase , start_positions=__lowerCamelCase , end_positions=__lowerCamelCase , cls_index=__lowerCamelCase , is_impossible=__lowerCamelCase , p_mask=__lowerCamelCase , ) UpperCamelCase :Union[str, Any] = model( __lowerCamelCase , start_positions=__lowerCamelCase , end_positions=__lowerCamelCase , cls_index=__lowerCamelCase , is_impossible=__lowerCamelCase , ) ((UpperCamelCase) , ) :int = result_with_labels.to_tuple() UpperCamelCase :int = model(__lowerCamelCase , start_positions=__lowerCamelCase , end_positions=__lowerCamelCase ) ((UpperCamelCase) , ) :List[Any] = result_with_labels.to_tuple() self.parent.assertEqual(result_with_labels.loss.shape , () ) self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual( result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual( result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) ) def _A ( self : List[Any] , __lowerCamelCase : Any , __lowerCamelCase : Any , __lowerCamelCase : int , __lowerCamelCase : Dict , __lowerCamelCase : Any , __lowerCamelCase : Any , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Tuple , ): UpperCamelCase :Optional[int] = FlaubertForSequenceClassification(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() UpperCamelCase :Tuple = model(__lowerCamelCase ) UpperCamelCase :List[str] = model(__lowerCamelCase , labels=__lowerCamelCase ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def _A ( self : Any , __lowerCamelCase : int , __lowerCamelCase : Optional[int] , __lowerCamelCase : str , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : int , __lowerCamelCase : Optional[int] , __lowerCamelCase : Tuple , __lowerCamelCase : List[Any] , __lowerCamelCase : List[Any] , ): UpperCamelCase :Dict = self.num_labels UpperCamelCase :Tuple = FlaubertForTokenClassification(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() UpperCamelCase :Optional[Any] = model(__lowerCamelCase , attention_mask=__lowerCamelCase , labels=__lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def _A ( self : Tuple , __lowerCamelCase : Tuple , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[str] , __lowerCamelCase : Tuple , __lowerCamelCase : Tuple , __lowerCamelCase : List[str] , __lowerCamelCase : List[Any] , ): UpperCamelCase :Union[str, Any] = self.num_choices UpperCamelCase :List[Any] = FlaubertForMultipleChoice(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() UpperCamelCase :Optional[Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() UpperCamelCase :Optional[int] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() UpperCamelCase :int = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() UpperCamelCase :Union[str, Any] = model( __lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def _A ( self : str ): UpperCamelCase :List[str] = self.prepare_config_and_inputs() ( ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ) :List[Any] = config_and_inputs UpperCamelCase :Union[str, Any] = { """input_ids""": input_ids, """token_type_ids""": token_type_ids, """lengths""": input_lengths, """attention_mask""": input_mask, } return config, inputs_dict @require_torch class _SCREAMING_SNAKE_CASE ( _a , _a , unittest.TestCase ): snake_case__ : Optional[int] = ( ( FlaubertModel, FlaubertWithLMHeadModel, FlaubertForQuestionAnswering, FlaubertForQuestionAnsweringSimple, FlaubertForSequenceClassification, FlaubertForTokenClassification, FlaubertForMultipleChoice, ) if is_torch_available() else () ) snake_case__ : Tuple = ( { """feature-extraction""": FlaubertModel, """fill-mask""": FlaubertWithLMHeadModel, """question-answering""": FlaubertForQuestionAnsweringSimple, """text-classification""": FlaubertForSequenceClassification, """token-classification""": FlaubertForTokenClassification, """zero-shot""": FlaubertForSequenceClassification, } if is_torch_available() else {} ) def _A ( self : int , __lowerCamelCase : List[str] , __lowerCamelCase : Tuple , __lowerCamelCase : List[str] , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int] ): if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith("""Fast""" ) ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def _A ( self : Optional[int] , __lowerCamelCase : Dict , __lowerCamelCase : Tuple , __lowerCamelCase : Tuple=False ): UpperCamelCase :Tuple = super()._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase ) if return_labels: if model_class.__name__ == "FlaubertForQuestionAnswering": UpperCamelCase :Tuple = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__lowerCamelCase ) UpperCamelCase :List[Any] = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__lowerCamelCase ) return inputs_dict def _A ( self : str ): UpperCamelCase :List[Any] = FlaubertModelTester(self ) UpperCamelCase :Any = ConfigTester(self , config_class=__lowerCamelCase , emb_dim=37 ) def _A ( self : Optional[int] ): self.config_tester.run_common_tests() def _A ( self : List[Any] ): UpperCamelCase :List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_model(*__lowerCamelCase ) def _A ( self : Optional[int] ): UpperCamelCase :Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_lm_head(*__lowerCamelCase ) def _A ( self : List[Any] ): UpperCamelCase :Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_simple_qa(*__lowerCamelCase ) def _A ( self : Union[str, Any] ): UpperCamelCase :int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_qa(*__lowerCamelCase ) def _A ( self : Optional[Any] ): UpperCamelCase :List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_sequence_classif(*__lowerCamelCase ) def _A ( self : Tuple ): UpperCamelCase :Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_token_classif(*__lowerCamelCase ) def _A ( self : int ): UpperCamelCase :Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_multiple_choice(*__lowerCamelCase ) @slow def _A ( self : Any ): for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCamelCase :Optional[int] = FlaubertModel.from_pretrained(__lowerCamelCase ) self.assertIsNotNone(__lowerCamelCase ) @slow @require_torch_gpu def _A ( self : Tuple ): UpperCamelCase , UpperCamelCase :Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # FlauBertForMultipleChoice behaves incorrectly in JIT environments. if model_class == FlaubertForMultipleChoice: return UpperCamelCase :Optional[Any] = True UpperCamelCase :Optional[Any] = model_class(config=__lowerCamelCase ) UpperCamelCase :str = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) UpperCamelCase :str = torch.jit.trace( __lowerCamelCase , (inputs_dict["""input_ids"""].to("""cpu""" ), inputs_dict["""attention_mask"""].to("""cpu""" )) ) with tempfile.TemporaryDirectory() as tmp: torch.jit.save(__lowerCamelCase , os.path.join(__lowerCamelCase , """traced_model.pt""" ) ) UpperCamelCase :int = torch.jit.load(os.path.join(__lowerCamelCase , """traced_model.pt""" ) , map_location=__lowerCamelCase ) loaded(inputs_dict["""input_ids"""].to(__lowerCamelCase ) , inputs_dict["""attention_mask"""].to(__lowerCamelCase ) ) @require_torch class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): @slow def _A ( self : Optional[Any] ): UpperCamelCase :Union[str, Any] = FlaubertModel.from_pretrained("""flaubert/flaubert_base_cased""" ) UpperCamelCase :Optional[Any] = torch.tensor([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] ) with torch.no_grad(): UpperCamelCase :Tuple = model(__lowerCamelCase )[0] UpperCamelCase :Union[str, Any] = torch.Size((1, 11, 768) ) self.assertEqual(output.shape , __lowerCamelCase ) UpperCamelCase :int = torch.tensor( [[[-2.6251, -1.4298, -0.0227], [-2.8510, -1.6387, 0.2258], [-2.8114, -1.1832, -0.3066]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , __lowerCamelCase , atol=1E-4 ) )
38
1
import socket def SCREAMING_SNAKE_CASE_ ( ) -> List[Any]: """simple docstring""" UpperCamelCase :Tuple = socket.socket(socket.AF_INET , socket.SOCK_STREAM ) UpperCamelCase :Union[str, Any] = socket.gethostname() UpperCamelCase :Optional[int] = 1_2312 sock.connect((host, port) ) sock.send(B"""Hello server!""" ) with open("""Received_file""" , """wb""" ) as out_file: print("""File opened""" ) print("""Receiving data...""" ) while True: UpperCamelCase :List[Any] = sock.recv(1024 ) if not data: break out_file.write(__magic_name__ ) print("""Successfully received the file""" ) sock.close() print("""Connection closed""" ) if __name__ == "__main__": main()
38
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor from .base import PipelineTool class _SCREAMING_SNAKE_CASE ( _a ): snake_case__ : Any = """openai/whisper-base""" snake_case__ : Optional[int] = ( """This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the """ """transcribed text.""" ) snake_case__ : Any = """transcriber""" snake_case__ : Optional[int] = WhisperProcessor snake_case__ : str = WhisperForConditionalGeneration snake_case__ : Optional[Any] = ["""audio"""] snake_case__ : Any = ["""text"""] def _A ( self : str , __lowerCamelCase : Dict ): return self.pre_processor(__lowerCamelCase , return_tensors="""pt""" ).input_features def _A ( self : Dict , __lowerCamelCase : List[Any] ): return self.model.generate(inputs=__lowerCamelCase ) def _A ( self : Any , __lowerCamelCase : Optional[Any] ): return self.pre_processor.batch_decode(__lowerCamelCase , skip_special_tokens=__lowerCamelCase )[0]
38
1
from __future__ import annotations UpperCAmelCase_ : List[Any] = list[list[int]] # assigning initial values to the grid UpperCAmelCase_ : Matrix = [ [3, 0, 6, 5, 0, 8, 4, 0, 0], [5, 2, 0, 0, 0, 0, 0, 0, 0], [0, 8, 7, 0, 0, 0, 0, 3, 1], [0, 0, 3, 0, 1, 0, 0, 8, 0], [9, 0, 0, 8, 6, 3, 0, 0, 5], [0, 5, 0, 0, 9, 0, 6, 0, 0], [1, 3, 0, 0, 0, 0, 2, 5, 0], [0, 0, 0, 0, 0, 0, 0, 7, 4], [0, 0, 5, 2, 0, 6, 3, 0, 0], ] # a grid with no solution UpperCAmelCase_ : Matrix = [ [5, 0, 6, 5, 0, 8, 4, 0, 3], [5, 2, 0, 0, 0, 0, 0, 0, 2], [1, 8, 7, 0, 0, 0, 0, 3, 1], [0, 0, 3, 0, 1, 0, 0, 8, 0], [9, 0, 0, 8, 6, 3, 0, 0, 5], [0, 5, 0, 0, 9, 0, 6, 0, 0], [1, 3, 0, 0, 0, 0, 2, 5, 0], [0, 0, 0, 0, 0, 0, 0, 7, 4], [0, 0, 5, 2, 0, 6, 3, 0, 0], ] def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Matrix , __magic_name__ : int , __magic_name__ : int , __magic_name__ : int ) -> bool: """simple docstring""" for i in range(9 ): if grid[row][i] == n or grid[i][column] == n: return False for i in range(3 ): for j in range(3 ): if grid[(row - row % 3) + i][(column - column % 3) + j] == n: return False return True def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Matrix ) -> tuple[int, int] | None: """simple docstring""" for i in range(9 ): for j in range(9 ): if grid[i][j] == 0: return i, j return None def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Matrix ) -> Matrix | None: """simple docstring""" if location := find_empty_location(__magic_name__ ): UpperCamelCase , UpperCamelCase :Tuple = location else: # If the location is ``None``, then the grid is solved. return grid for digit in range(1 , 10 ): if is_safe(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ): UpperCamelCase :Dict = digit if sudoku(__magic_name__ ) is not None: return grid UpperCamelCase :List[str] = 0 return None def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Matrix ) -> None: """simple docstring""" for row in grid: for cell in row: print(__magic_name__ , end=""" """ ) print() if __name__ == "__main__": # make a copy of grid so that you can compare with the unmodified grid for example_grid in (initial_grid, no_solution): print('''\nExample grid:\n''' + '''=''' * 20) print_solution(example_grid) print('''\nExample grid solution:''') UpperCAmelCase_ : Union[str, Any] = sudoku(example_grid) if solution is not None: print_solution(solution) else: print('''Cannot find a solution.''')
38
import copy from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Audio, Features, Value from .base import TaskTemplate @dataclass(frozen=_a ) class _SCREAMING_SNAKE_CASE ( _a ): snake_case__ : str = field(default="""automatic-speech-recognition""" , metadata={"""include_in_asdict_even_if_is_default""": True} ) snake_case__ : ClassVar[Features] = Features({"""audio""": Audio()} ) snake_case__ : ClassVar[Features] = Features({"""transcription""": Value("""string""" )} ) snake_case__ : str = "audio" snake_case__ : str = "transcription" def _A ( self : List[str] , __lowerCamelCase : Dict ): if self.audio_column not in features: raise ValueError(F"""Column {self.audio_column} is not present in features.""" ) if not isinstance(features[self.audio_column] , __lowerCamelCase ): raise ValueError(F"""Column {self.audio_column} is not an Audio type.""" ) UpperCamelCase :int = copy.deepcopy(self ) UpperCamelCase :Any = self.input_schema.copy() UpperCamelCase :List[str] = features[self.audio_column] UpperCamelCase :List[Any] = input_schema return task_template @property def _A ( self : Optional[int] ): return {self.audio_column: "audio", self.transcription_column: "transcription"}
38
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) UpperCAmelCase_ : str = { '''configuration_distilbert''': [ '''DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DistilBertConfig''', '''DistilBertOnnxConfig''', ], '''tokenization_distilbert''': ['''DistilBertTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ : Optional[Any] = ['''DistilBertTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ : Optional[int] = [ '''DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''DistilBertForMaskedLM''', '''DistilBertForMultipleChoice''', '''DistilBertForQuestionAnswering''', '''DistilBertForSequenceClassification''', '''DistilBertForTokenClassification''', '''DistilBertModel''', '''DistilBertPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ : Optional[Any] = [ '''TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFDistilBertForMaskedLM''', '''TFDistilBertForMultipleChoice''', '''TFDistilBertForQuestionAnswering''', '''TFDistilBertForSequenceClassification''', '''TFDistilBertForTokenClassification''', '''TFDistilBertMainLayer''', '''TFDistilBertModel''', '''TFDistilBertPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ : List[Any] = [ '''FlaxDistilBertForMaskedLM''', '''FlaxDistilBertForMultipleChoice''', '''FlaxDistilBertForQuestionAnswering''', '''FlaxDistilBertForSequenceClassification''', '''FlaxDistilBertForTokenClassification''', '''FlaxDistilBertModel''', '''FlaxDistilBertPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_distilbert import ( DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, DistilBertConfig, DistilBertOnnxConfig, ) from .tokenization_distilbert import DistilBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_distilbert_fast import DistilBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_distilbert import ( DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, DistilBertModel, DistilBertPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_distilbert import ( TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFDistilBertForMaskedLM, TFDistilBertForMultipleChoice, TFDistilBertForQuestionAnswering, TFDistilBertForSequenceClassification, TFDistilBertForTokenClassification, TFDistilBertMainLayer, TFDistilBertModel, TFDistilBertPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_distilbert import ( FlaxDistilBertForMaskedLM, FlaxDistilBertForMultipleChoice, FlaxDistilBertForQuestionAnswering, FlaxDistilBertForSequenceClassification, FlaxDistilBertForTokenClassification, FlaxDistilBertModel, FlaxDistilBertPreTrainedModel, ) else: import sys UpperCAmelCase_ : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
38
from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline else: from .camera import create_pan_cameras from .pipeline_shap_e import ShapEPipeline from .pipeline_shap_e_img2img import ShapEImgaImgPipeline from .renderer import ( BoundingBoxVolume, ImportanceRaySampler, MLPNeRFModelOutput, MLPNeRSTFModel, ShapEParamsProjModel, ShapERenderer, StratifiedRaySampler, VoidNeRFModel, )
38
1
import logging import os import sys from dataclasses import dataclass, field from typing import Optional import torch from datasets import load_dataset from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor from torchvision.transforms.functional import InterpolationMode import transformers from transformers import ( HfArgumentParser, Trainer, TrainingArguments, ViTImageProcessor, ViTMAEConfig, ViTMAEForPreTraining, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version UpperCAmelCase_ : Optional[Any] = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version('''4.31.0''') require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt''') @dataclass class _SCREAMING_SNAKE_CASE : snake_case__ : Optional[str] = field( default="""cifar10""" , metadata={"""help""": """Name of a dataset from the datasets package"""} ) snake_case__ : Optional[str] = field( default=_a , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} ) snake_case__ : Optional[str] = field( default=_a , metadata={"""help""": """The column name of the images in the files."""} ) snake_case__ : Optional[str] = field(default=_a , metadata={"""help""": """A folder containing the training data."""} ) snake_case__ : Optional[str] = field(default=_a , metadata={"""help""": """A folder containing the validation data."""} ) snake_case__ : Optional[float] = field( default=0.15 , metadata={"""help""": """Percent to split off of train for validation."""} ) snake_case__ : Optional[int] = field( default=_a , metadata={ """help""": ( """For debugging purposes or quicker training, truncate the number of training examples to this """ """value if set.""" ) } , ) snake_case__ : Optional[int] = field( default=_a , metadata={ """help""": ( """For debugging purposes or quicker training, truncate the number of evaluation examples to this """ """value if set.""" ) } , ) def _A ( self : Dict ): UpperCamelCase :Dict = {} if self.train_dir is not None: UpperCamelCase :Any = self.train_dir if self.validation_dir is not None: UpperCamelCase :List[Any] = self.validation_dir UpperCamelCase :Optional[Any] = data_files if data_files else None @dataclass class _SCREAMING_SNAKE_CASE : snake_case__ : str = field( default=_a , metadata={ """help""": ( """The model checkpoint for weights initialization.Don't set if you want to train a model from scratch.""" ) } , ) snake_case__ : Optional[str] = field( default=_a , metadata={"""help""": """Pretrained config name or path if not the same as model_name_or_path"""} ) snake_case__ : Optional[str] = field( default=_a , metadata={ """help""": ( """Override some existing default config settings when a model is trained from scratch. Example: """ """n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index""" ) } , ) snake_case__ : Optional[str] = field( default=_a , metadata={"""help""": """Where do you want to store the pretrained models downloaded from s3"""} ) snake_case__ : str = field( default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , ) snake_case__ : str = field(default=_a , metadata={"""help""": """Name or path of preprocessor config."""} ) snake_case__ : bool = field( default=_a , metadata={ """help""": ( """Will use the token generated when running `huggingface-cli login` (necessary to use this script """ """with private models).""" ) } , ) snake_case__ : float = field( default=0.75 , metadata={"""help""": """The ratio of the number of masked tokens in the input sequence."""} ) snake_case__ : bool = field( default=_a , metadata={"""help""": """Whether or not to train with normalized pixel values as target."""} ) @dataclass class _SCREAMING_SNAKE_CASE ( _a ): snake_case__ : float = field( default=1e-3 , metadata={"""help""": """Base learning rate: absolute_lr = base_lr * total_batch_size / 256."""} ) def SCREAMING_SNAKE_CASE_ ( __magic_name__ : List[str] ) -> Dict: """simple docstring""" UpperCamelCase :Tuple = torch.stack([example["""pixel_values"""] for example in examples] ) return {"pixel_values": pixel_values} def SCREAMING_SNAKE_CASE_ ( ) -> Tuple: """simple docstring""" UpperCamelCase :Any = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. UpperCamelCase , UpperCamelCase , UpperCamelCase :Tuple = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: UpperCamelCase , UpperCamelCase , UpperCamelCase :str = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("""run_mae""" , __magic_name__ , __magic_name__ ) # Setup logging logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() UpperCamelCase :List[str] = training_args.get_process_log_level() logger.setLevel(__magic_name__ ) transformers.utils.logging.set_verbosity(__magic_name__ ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}""" + f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" ) logger.info(f"""Training/evaluation parameters {training_args}""" ) # Detecting last checkpoint. UpperCamelCase :List[str] = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: UpperCamelCase :Tuple = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( f"""Output directory ({training_args.output_dir}) already exists and is not empty. """ """Use --overwrite_output_dir to overcome.""" ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """ """the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" ) # Initialize our dataset. UpperCamelCase :Optional[Any] = load_dataset( data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) # If we don't have a validation split, split off a percentage of train as validation. UpperCamelCase :Optional[Any] = None if """validation""" in ds.keys() else data_args.train_val_split if isinstance(data_args.train_val_split , __magic_name__ ) and data_args.train_val_split > 0.0: UpperCamelCase :Any = ds["""train"""].train_test_split(data_args.train_val_split ) UpperCamelCase :Optional[Any] = split["""train"""] UpperCamelCase :int = split["""test"""] # Load pretrained model and image processor # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. UpperCamelCase :Tuple = { """cache_dir""": model_args.cache_dir, """revision""": model_args.model_revision, """use_auth_token""": True if model_args.use_auth_token else None, } if model_args.config_name: UpperCamelCase :str = ViTMAEConfig.from_pretrained(model_args.config_name , **__magic_name__ ) elif model_args.model_name_or_path: UpperCamelCase :List[Any] = ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **__magic_name__ ) else: UpperCamelCase :int = ViTMAEConfig() logger.warning("""You are instantiating a new config instance from scratch.""" ) if model_args.config_overrides is not None: logger.info(f"""Overriding config: {model_args.config_overrides}""" ) config.update_from_string(model_args.config_overrides ) logger.info(f"""New config: {config}""" ) # adapt config config.update( { """mask_ratio""": model_args.mask_ratio, """norm_pix_loss""": model_args.norm_pix_loss, } ) # create image processor if model_args.image_processor_name: UpperCamelCase :Any = ViTImageProcessor.from_pretrained(model_args.image_processor_name , **__magic_name__ ) elif model_args.model_name_or_path: UpperCamelCase :List[str] = ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **__magic_name__ ) else: UpperCamelCase :List[str] = ViTImageProcessor() # create model if model_args.model_name_or_path: UpperCamelCase :List[Any] = ViTMAEForPreTraining.from_pretrained( model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=__magic_name__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) else: logger.info("""Training new model from scratch""" ) UpperCamelCase :str = ViTMAEForPreTraining(__magic_name__ ) if training_args.do_train: UpperCamelCase :int = ds["""train"""].column_names else: UpperCamelCase :List[str] = ds["""validation"""].column_names if data_args.image_column_name is not None: UpperCamelCase :List[Any] = data_args.image_column_name elif "image" in column_names: UpperCamelCase :Any = """image""" elif "img" in column_names: UpperCamelCase :Union[str, Any] = """img""" else: UpperCamelCase :Optional[int] = column_names[0] # transformations as done in original MAE paper # source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py if "shortest_edge" in image_processor.size: UpperCamelCase :Union[str, Any] = image_processor.size["""shortest_edge"""] else: UpperCamelCase :List[str] = (image_processor.size["""height"""], image_processor.size["""width"""]) UpperCamelCase :Optional[int] = Compose( [ Lambda(lambda __magic_name__ : img.convert("""RGB""" ) if img.mode != "RGB" else img ), RandomResizedCrop(__magic_name__ , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ), RandomHorizontalFlip(), ToTensor(), Normalize(mean=image_processor.image_mean , std=image_processor.image_std ), ] ) def preprocess_images(__magic_name__ : Union[str, Any] ): UpperCamelCase :int = [transforms(__magic_name__ ) for image in examples[image_column_name]] return examples if training_args.do_train: if "train" not in ds: raise ValueError("""--do_train requires a train dataset""" ) if data_args.max_train_samples is not None: UpperCamelCase :List[str] = ds["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) ) # Set the training transforms ds["train"].set_transform(__magic_name__ ) if training_args.do_eval: if "validation" not in ds: raise ValueError("""--do_eval requires a validation dataset""" ) if data_args.max_eval_samples is not None: UpperCamelCase :Dict = ( ds["""validation"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) ) ) # Set the validation transforms ds["validation"].set_transform(__magic_name__ ) # Compute absolute learning rate UpperCamelCase :Optional[Any] = ( training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size ) if training_args.base_learning_rate is not None: UpperCamelCase :Optional[int] = training_args.base_learning_rate * total_train_batch_size / 256 # Initialize our trainer UpperCamelCase :str = Trainer( model=__magic_name__ , args=__magic_name__ , train_dataset=ds["""train"""] if training_args.do_train else None , eval_dataset=ds["""validation"""] if training_args.do_eval else None , tokenizer=__magic_name__ , data_collator=__magic_name__ , ) # Training if training_args.do_train: UpperCamelCase :Optional[int] = None if training_args.resume_from_checkpoint is not None: UpperCamelCase :int = training_args.resume_from_checkpoint elif last_checkpoint is not None: UpperCamelCase :Any = last_checkpoint UpperCamelCase :int = trainer.train(resume_from_checkpoint=__magic_name__ ) trainer.save_model() trainer.log_metrics("""train""" , train_result.metrics ) trainer.save_metrics("""train""" , train_result.metrics ) trainer.save_state() # Evaluation if training_args.do_eval: UpperCamelCase :List[str] = trainer.evaluate() trainer.log_metrics("""eval""" , __magic_name__ ) trainer.save_metrics("""eval""" , __magic_name__ ) # Write model card and (optionally) push to hub UpperCamelCase :Any = { """tasks""": """masked-auto-encoding""", """dataset""": data_args.dataset_name, """tags""": ["""masked-auto-encoding"""], } if training_args.push_to_hub: trainer.push_to_hub(**__magic_name__ ) else: trainer.create_model_card(**__magic_name__ ) def SCREAMING_SNAKE_CASE_ ( __magic_name__ : int ) -> Union[str, Any]: """simple docstring""" main() if __name__ == "__main__": main()
38
import re import string import numpy as np import datasets UpperCAmelCase_ : Dict = ''' Returns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list. ''' UpperCAmelCase_ : Any = ''' Args: predictions: List of predicted texts. references: List of reference texts. regexes_to_ignore: List, defaults to None. Regex expressions of characters to ignore when calculating the exact matches. Note: these regexes are removed from the input data before the changes based on the options below (e.g. ignore_case, ignore_punctuation, ignore_numbers) are applied. ignore_case: Boolean, defaults to False. If true, turns everything to lowercase so that capitalization differences are ignored. ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before comparing predictions and references. ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before comparing predictions and references. Returns: exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive. Examples: >>> exact_match = datasets.load_metric("exact_match") >>> refs = ["the cat", "theater", "YELLING", "agent007"] >>> preds = ["cat?", "theater", "yelling", "agent"] >>> results = exact_match.compute(references=refs, predictions=preds) >>> print(round(results["exact_match"], 1)) 25.0 >>> exact_match = datasets.load_metric("exact_match") >>> refs = ["the cat", "theater", "YELLING", "agent007"] >>> preds = ["cat?", "theater", "yelling", "agent"] >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell"], ignore_case=True, ignore_punctuation=True) >>> print(round(results["exact_match"], 1)) 50.0 >>> exact_match = datasets.load_metric("exact_match") >>> refs = ["the cat", "theater", "YELLING", "agent007"] >>> preds = ["cat?", "theater", "yelling", "agent"] >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True) >>> print(round(results["exact_match"], 1)) 75.0 >>> exact_match = datasets.load_metric("exact_match") >>> refs = ["the cat", "theater", "YELLING", "agent007"] >>> preds = ["cat?", "theater", "yelling", "agent"] >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True) >>> print(round(results["exact_match"], 1)) 100.0 >>> exact_match = datasets.load_metric("exact_match") >>> refs = ["The cat sat on the mat.", "Theaters are great.", "It\'s like comparing oranges and apples."] >>> preds = ["The cat sat on the mat?", "Theaters are great.", "It\'s like comparing apples and oranges."] >>> results = exact_match.compute(references=refs, predictions=preds) >>> print(round(results["exact_match"], 1)) 33.3 ''' UpperCAmelCase_ : Tuple = ''' ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class _SCREAMING_SNAKE_CASE ( datasets.Metric ): def _A ( self : Optional[int] ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Value("""string""" , id="""sequence""" ), """references""": datasets.Value("""string""" , id="""sequence""" ), } ) , reference_urls=[] , ) def _A ( self : Optional[int] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[str] , __lowerCamelCase : List[str]=None , __lowerCamelCase : Optional[int]=False , __lowerCamelCase : int=False , __lowerCamelCase : Optional[int]=False , ): if regexes_to_ignore is not None: for s in regexes_to_ignore: UpperCamelCase :str = np.array([re.sub(__lowerCamelCase , """""" , __lowerCamelCase ) for x in predictions] ) UpperCamelCase :Tuple = np.array([re.sub(__lowerCamelCase , """""" , __lowerCamelCase ) for x in references] ) else: UpperCamelCase :Any = np.asarray(__lowerCamelCase ) UpperCamelCase :str = np.asarray(__lowerCamelCase ) if ignore_case: UpperCamelCase :Tuple = np.char.lower(__lowerCamelCase ) UpperCamelCase :Any = np.char.lower(__lowerCamelCase ) if ignore_punctuation: UpperCamelCase :Optional[int] = string.punctuation.maketrans("""""" , """""" , string.punctuation ) UpperCamelCase :Optional[Any] = np.char.translate(__lowerCamelCase , table=__lowerCamelCase ) UpperCamelCase :List[str] = np.char.translate(__lowerCamelCase , table=__lowerCamelCase ) if ignore_numbers: UpperCamelCase :Tuple = string.digits.maketrans("""""" , """""" , string.digits ) UpperCamelCase :Dict = np.char.translate(__lowerCamelCase , table=__lowerCamelCase ) UpperCamelCase :Tuple = np.char.translate(__lowerCamelCase , table=__lowerCamelCase ) UpperCamelCase :int = predictions == references return {"exact_match": np.mean(__lowerCamelCase ) * 100}
38
1
from ....configuration_utils import PretrainedConfig from ....utils import logging UpperCAmelCase_ : List[str] = logging.get_logger(__name__) UpperCAmelCase_ : int = { '''CarlCochet/trajectory-transformer-halfcheetah-medium-v2''': ( '''https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json''' ), # See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer } class _SCREAMING_SNAKE_CASE ( _a ): snake_case__ : Any = """trajectory_transformer""" snake_case__ : Optional[Any] = ["""past_key_values"""] snake_case__ : Tuple = { """hidden_size""": """n_embd""", """num_attention_heads""": """n_head""", """num_hidden_layers""": """n_layer""", } def __init__( self : Union[str, Any] , __lowerCamelCase : Any=100 , __lowerCamelCase : str=5 , __lowerCamelCase : str=1 , __lowerCamelCase : Optional[int]=1 , __lowerCamelCase : int=249 , __lowerCamelCase : str=6 , __lowerCamelCase : Dict=17 , __lowerCamelCase : Optional[Any]=25 , __lowerCamelCase : List[str]=4 , __lowerCamelCase : str=4 , __lowerCamelCase : Tuple=128 , __lowerCamelCase : Dict=0.1 , __lowerCamelCase : str=0.1 , __lowerCamelCase : Any=0.1 , __lowerCamelCase : int=0.0006 , __lowerCamelCase : List[str]=512 , __lowerCamelCase : str=0.02 , __lowerCamelCase : Any=1E-12 , __lowerCamelCase : int=1 , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : Tuple=1 , __lowerCamelCase : int=50_256 , __lowerCamelCase : Union[str, Any]=50_256 , **__lowerCamelCase : Dict , ): UpperCamelCase :Dict = vocab_size UpperCamelCase :int = action_weight UpperCamelCase :Tuple = reward_weight UpperCamelCase :str = value_weight UpperCamelCase :Tuple = max_position_embeddings UpperCamelCase :Tuple = block_size UpperCamelCase :Optional[int] = action_dim UpperCamelCase :int = observation_dim UpperCamelCase :List[str] = transition_dim UpperCamelCase :List[Any] = learning_rate UpperCamelCase :Optional[Any] = n_layer UpperCamelCase :Any = n_head UpperCamelCase :List[str] = n_embd UpperCamelCase :Any = embd_pdrop UpperCamelCase :str = attn_pdrop UpperCamelCase :Union[str, Any] = resid_pdrop UpperCamelCase :Optional[Any] = initializer_range UpperCamelCase :List[Any] = layer_norm_eps UpperCamelCase :Optional[int] = kaiming_initializer_range UpperCamelCase :Tuple = use_cache super().__init__(pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase )
38
from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...onnx.utils import compute_effective_axis_dimension from ...utils import logging if TYPE_CHECKING: from ...processing_utils import ProcessorMixin from ...utils import TensorType UpperCAmelCase_ : Dict = logging.get_logger(__name__) UpperCAmelCase_ : str = { '''microsoft/layoutlmv3-base''': '''https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json''', } class _SCREAMING_SNAKE_CASE ( _a ): snake_case__ : Optional[int] = """layoutlmv3""" def __init__( self : List[Any] , __lowerCamelCase : Optional[Any]=50_265 , __lowerCamelCase : Dict=768 , __lowerCamelCase : Any=12 , __lowerCamelCase : int=12 , __lowerCamelCase : str=3_072 , __lowerCamelCase : List[Any]="gelu" , __lowerCamelCase : List[str]=0.1 , __lowerCamelCase : Optional[int]=0.1 , __lowerCamelCase : Optional[Any]=512 , __lowerCamelCase : Optional[int]=2 , __lowerCamelCase : Union[str, Any]=0.02 , __lowerCamelCase : Union[str, Any]=1E-5 , __lowerCamelCase : Any=1 , __lowerCamelCase : Optional[int]=0 , __lowerCamelCase : List[Any]=2 , __lowerCamelCase : Dict=1_024 , __lowerCamelCase : List[Any]=128 , __lowerCamelCase : str=128 , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : str=32 , __lowerCamelCase : List[Any]=128 , __lowerCamelCase : str=64 , __lowerCamelCase : List[str]=256 , __lowerCamelCase : Dict=True , __lowerCamelCase : List[Any]=True , __lowerCamelCase : Tuple=True , __lowerCamelCase : Tuple=224 , __lowerCamelCase : Tuple=3 , __lowerCamelCase : Dict=16 , __lowerCamelCase : Union[str, Any]=None , **__lowerCamelCase : Optional[Any] , ): super().__init__( vocab_size=__lowerCamelCase , hidden_size=__lowerCamelCase , num_hidden_layers=__lowerCamelCase , num_attention_heads=__lowerCamelCase , intermediate_size=__lowerCamelCase , hidden_act=__lowerCamelCase , hidden_dropout_prob=__lowerCamelCase , attention_probs_dropout_prob=__lowerCamelCase , max_position_embeddings=__lowerCamelCase , type_vocab_size=__lowerCamelCase , initializer_range=__lowerCamelCase , layer_norm_eps=__lowerCamelCase , pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase , ) UpperCamelCase :int = max_ad_position_embeddings UpperCamelCase :Tuple = coordinate_size UpperCamelCase :List[Any] = shape_size UpperCamelCase :Union[str, Any] = has_relative_attention_bias UpperCamelCase :Any = rel_pos_bins UpperCamelCase :Optional[Any] = max_rel_pos UpperCamelCase :str = has_spatial_attention_bias UpperCamelCase :Tuple = rel_ad_pos_bins UpperCamelCase :Optional[int] = max_rel_ad_pos UpperCamelCase :Tuple = text_embed UpperCamelCase :str = visual_embed UpperCamelCase :Optional[Any] = input_size UpperCamelCase :str = num_channels UpperCamelCase :List[Any] = patch_size UpperCamelCase :Optional[Any] = classifier_dropout class _SCREAMING_SNAKE_CASE ( _a ): snake_case__ : int = version.parse("""1.12""" ) @property def _A ( self : Optional[int] ): # The order of inputs is different for question answering and sequence classification if self.task in ["question-answering", "sequence-classification"]: return OrderedDict( [ ("""input_ids""", {0: """batch""", 1: """sequence"""}), ("""attention_mask""", {0: """batch""", 1: """sequence"""}), ("""bbox""", {0: """batch""", 1: """sequence"""}), ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) else: return OrderedDict( [ ("""input_ids""", {0: """batch""", 1: """sequence"""}), ("""bbox""", {0: """batch""", 1: """sequence"""}), ("""attention_mask""", {0: """batch""", 1: """sequence"""}), ("""pixel_values""", {0: """batch""", 1: """num_channels"""}), ] ) @property def _A ( self : str ): return 1E-5 @property def _A ( self : Dict ): return 12 def _A ( self : Dict , __lowerCamelCase : "ProcessorMixin" , __lowerCamelCase : int = -1 , __lowerCamelCase : int = -1 , __lowerCamelCase : bool = False , __lowerCamelCase : Optional["TensorType"] = None , __lowerCamelCase : int = 3 , __lowerCamelCase : int = 40 , __lowerCamelCase : int = 40 , ): setattr(processor.image_processor , """apply_ocr""" , __lowerCamelCase ) # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX UpperCamelCase :Optional[Any] = compute_effective_axis_dimension( __lowerCamelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX UpperCamelCase :Optional[int] = processor.tokenizer.num_special_tokens_to_add(__lowerCamelCase ) UpperCamelCase :int = compute_effective_axis_dimension( __lowerCamelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__lowerCamelCase ) # Generate dummy inputs according to compute batch and sequence UpperCamelCase :Any = [[""" """.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size # Generate dummy bounding boxes UpperCamelCase :Optional[Any] = [[[48, 84, 73, 128]]] * batch_size # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX # batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch) UpperCamelCase :List[str] = self._generate_dummy_images(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) UpperCamelCase :Any = dict( processor( __lowerCamelCase , text=__lowerCamelCase , boxes=__lowerCamelCase , return_tensors=__lowerCamelCase , ) ) return inputs
38
1
from typing import TYPE_CHECKING from ....utils import _LazyModule UpperCAmelCase_ : str = {'''tokenization_tapex''': ['''TapexTokenizer''']} if TYPE_CHECKING: from .tokenization_tapex import TapexTokenizer else: import sys UpperCAmelCase_ : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
38
import gc import random import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import ( AutoencoderKL, DiffusionPipeline, EulerDiscreteScheduler, StableDiffusionXLImgaImgPipeline, UNetaDConditionModel, ) from diffusers.utils import floats_tensor, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class _SCREAMING_SNAKE_CASE ( _a , _a , unittest.TestCase ): snake_case__ : Any = StableDiffusionXLImgaImgPipeline snake_case__ : Tuple = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width"""} snake_case__ : Tuple = PipelineTesterMixin.required_optional_params - {"""latents"""} snake_case__ : Any = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS snake_case__ : List[str] = IMAGE_TO_IMAGE_IMAGE_PARAMS snake_case__ : Tuple = IMAGE_TO_IMAGE_IMAGE_PARAMS def _A ( self : int ): torch.manual_seed(0 ) UpperCamelCase :Any = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , attention_head_dim=(2, 4) , use_linear_projection=__lowerCamelCase , addition_embed_type="""text_time""" , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=80 , cross_attention_dim=64 , ) UpperCamelCase :Tuple = EulerDiscreteScheduler( beta_start=0.00085 , beta_end=0.012 , steps_offset=1 , beta_schedule="""scaled_linear""" , timestep_spacing="""leading""" , ) torch.manual_seed(0 ) UpperCamelCase :Union[str, Any] = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , ) torch.manual_seed(0 ) UpperCamelCase :Optional[int] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act="""gelu""" , projection_dim=32 , ) UpperCamelCase :Any = CLIPTextModel(__lowerCamelCase ) UpperCamelCase :List[Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" , local_files_only=__lowerCamelCase ) UpperCamelCase :List[Any] = CLIPTextModelWithProjection(__lowerCamelCase ) UpperCamelCase :int = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" , local_files_only=__lowerCamelCase ) UpperCamelCase :Union[str, Any] = { """unet""": unet, """scheduler""": scheduler, """vae""": vae, """text_encoder""": text_encoder, """tokenizer""": tokenizer, """text_encoder_2""": text_encoder_a, """tokenizer_2""": tokenizer_a, # "safety_checker": None, # "feature_extractor": None, } return components def _A ( self : Tuple , __lowerCamelCase : Any , __lowerCamelCase : Optional[Any]=0 ): UpperCamelCase :Tuple = floats_tensor((1, 3, 32, 32) , rng=random.Random(__lowerCamelCase ) ).to(__lowerCamelCase ) UpperCamelCase :List[str] = image / 2 + 0.5 if str(__lowerCamelCase ).startswith("""mps""" ): UpperCamelCase :Any = torch.manual_seed(__lowerCamelCase ) else: UpperCamelCase :List[Any] = torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase ) UpperCamelCase :str = { """prompt""": """A painting of a squirrel eating a burger""", """image""": image, """generator""": generator, """num_inference_steps""": 2, """guidance_scale""": 5.0, """output_type""": """numpy""", """strength""": 0.75, } return inputs def _A ( self : str ): UpperCamelCase :List[str] = """cpu""" # ensure determinism for the device-dependent torch.Generator UpperCamelCase :Optional[Any] = self.get_dummy_components() UpperCamelCase :List[Any] = StableDiffusionXLImgaImgPipeline(**__lowerCamelCase ) UpperCamelCase :Any = sd_pipe.to(__lowerCamelCase ) sd_pipe.set_progress_bar_config(disable=__lowerCamelCase ) UpperCamelCase :Optional[Any] = self.get_dummy_inputs(__lowerCamelCase ) UpperCamelCase :Union[str, Any] = sd_pipe(**__lowerCamelCase ).images UpperCamelCase :Union[str, Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) UpperCamelCase :List[Any] = np.array([0.4656, 0.4840, 0.4439, 0.6698, 0.5574, 0.4524, 0.5799, 0.5943, 0.5165] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def _A ( self : Dict ): super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 ) def _A ( self : Optional[Any] ): super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) def _A ( self : Union[str, Any] ): pass def _A ( self : Optional[int] ): UpperCamelCase :Union[str, Any] = self.get_dummy_components() UpperCamelCase :Dict = StableDiffusionXLImgaImgPipeline(**__lowerCamelCase ) UpperCamelCase :List[Any] = sd_pipe.to(__lowerCamelCase ) UpperCamelCase :List[str] = sd_pipe.to(__lowerCamelCase ) sd_pipe.set_progress_bar_config(disable=__lowerCamelCase ) # forward without prompt embeds UpperCamelCase :List[Any] = self.get_dummy_inputs(__lowerCamelCase ) UpperCamelCase :int = 3 * ["""this is a negative prompt"""] UpperCamelCase :Union[str, Any] = negative_prompt UpperCamelCase :Union[str, Any] = 3 * [inputs["""prompt"""]] UpperCamelCase :Dict = sd_pipe(**__lowerCamelCase ) UpperCamelCase :Union[str, Any] = output.images[0, -3:, -3:, -1] # forward with prompt embeds UpperCamelCase :Union[str, Any] = self.get_dummy_inputs(__lowerCamelCase ) UpperCamelCase :Optional[int] = 3 * ["""this is a negative prompt"""] UpperCamelCase :Union[str, Any] = 3 * [inputs.pop("""prompt""" )] ( ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ) :Union[str, Any] = sd_pipe.encode_prompt(__lowerCamelCase , negative_prompt=__lowerCamelCase ) UpperCamelCase :Dict = sd_pipe( **__lowerCamelCase , prompt_embeds=__lowerCamelCase , negative_prompt_embeds=__lowerCamelCase , pooled_prompt_embeds=__lowerCamelCase , negative_pooled_prompt_embeds=__lowerCamelCase , ) UpperCamelCase :Union[str, Any] = output.images[0, -3:, -3:, -1] # make sure that it's equal assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4 @slow @require_torch_gpu class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): def _A ( self : Tuple ): super().tearDown() gc.collect() torch.cuda.empty_cache() def _A ( self : List[Any] , __lowerCamelCase : Any , __lowerCamelCase : Dict="cpu" , __lowerCamelCase : List[Any]=torch.floataa , __lowerCamelCase : Tuple=0 ): UpperCamelCase :Optional[int] = torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase ) UpperCamelCase :Optional[Any] = np.random.RandomState(__lowerCamelCase ).standard_normal((1, 4, 64, 64) ) UpperCamelCase :Dict = torch.from_numpy(__lowerCamelCase ).to(device=__lowerCamelCase , dtype=__lowerCamelCase ) UpperCamelCase :str = { """prompt""": """a photograph of an astronaut riding a horse""", """latents""": latents, """generator""": generator, """num_inference_steps""": 3, """guidance_scale""": 7.5, """output_type""": """numpy""", } return inputs def _A ( self : Optional[Any] ): UpperCamelCase :Any = DiffusionPipeline.from_pretrained("""stabilityai/stable-diffusion-2-base""" ) pipe.to(__lowerCamelCase ) pipe.set_progress_bar_config(disable=__lowerCamelCase ) UpperCamelCase :Optional[Any] = self.get_inputs(__lowerCamelCase ) UpperCamelCase :Optional[int] = pipe(**__lowerCamelCase ).images UpperCamelCase :Dict = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) UpperCamelCase :Union[str, Any] = np.array([0.49493, 0.47896, 0.40798, 0.54214, 0.53212, 0.48202, 0.47656, 0.46329, 0.48506] ) assert np.abs(image_slice - expected_slice ).max() < 7E-3
38
1
import numpy as np import torch from imwatermark import WatermarkEncoder # Copied from https://github.com/Stability-AI/generative-models/blob/613af104c6b85184091d42d374fef420eddb356d/scripts/demo/streamlit_helpers.py#L66 UpperCAmelCase_ : Any = 0B1011_0011_1110_1100_1001_0000_0111_1011_1011_0001_1001_1110 # bin(x)[2:] gives bits of x as str, use int to convert them to 0/1 UpperCAmelCase_ : List[Any] = [int(bit) for bit in bin(WATERMARK_MESSAGE)[2:]] class _SCREAMING_SNAKE_CASE : def __init__( self : int ): UpperCamelCase :Optional[int] = WATERMARK_BITS UpperCamelCase :Dict = WatermarkEncoder() self.encoder.set_watermark("""bits""" , self.watermark ) def _A ( self : str , __lowerCamelCase : torch.FloatTensor ): # can't encode images that are smaller than 256 if images.shape[-1] < 256: return images UpperCamelCase :Optional[Any] = (255 * (images / 2 + 0.5)).cpu().permute(0 , 2 , 3 , 1 ).float().numpy() UpperCamelCase :Dict = [self.encoder.encode(__lowerCamelCase , """dwtDct""" ) for image in images] UpperCamelCase :Optional[Any] = torch.from_numpy(np.array(__lowerCamelCase ) ).permute(0 , 3 , 1 , 2 ) UpperCamelCase :Optional[Any] = torch.clamp(2 * (images / 255 - 0.5) , min=-1.0 , max=1.0 ) return images
38
from ....configuration_utils import PretrainedConfig from ....utils import logging UpperCAmelCase_ : List[str] = logging.get_logger(__name__) UpperCAmelCase_ : int = { '''CarlCochet/trajectory-transformer-halfcheetah-medium-v2''': ( '''https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json''' ), # See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer } class _SCREAMING_SNAKE_CASE ( _a ): snake_case__ : Any = """trajectory_transformer""" snake_case__ : Optional[Any] = ["""past_key_values"""] snake_case__ : Tuple = { """hidden_size""": """n_embd""", """num_attention_heads""": """n_head""", """num_hidden_layers""": """n_layer""", } def __init__( self : Union[str, Any] , __lowerCamelCase : Any=100 , __lowerCamelCase : str=5 , __lowerCamelCase : str=1 , __lowerCamelCase : Optional[int]=1 , __lowerCamelCase : int=249 , __lowerCamelCase : str=6 , __lowerCamelCase : Dict=17 , __lowerCamelCase : Optional[Any]=25 , __lowerCamelCase : List[str]=4 , __lowerCamelCase : str=4 , __lowerCamelCase : Tuple=128 , __lowerCamelCase : Dict=0.1 , __lowerCamelCase : str=0.1 , __lowerCamelCase : Any=0.1 , __lowerCamelCase : int=0.0006 , __lowerCamelCase : List[str]=512 , __lowerCamelCase : str=0.02 , __lowerCamelCase : Any=1E-12 , __lowerCamelCase : int=1 , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : Tuple=1 , __lowerCamelCase : int=50_256 , __lowerCamelCase : Union[str, Any]=50_256 , **__lowerCamelCase : Dict , ): UpperCamelCase :Dict = vocab_size UpperCamelCase :int = action_weight UpperCamelCase :Tuple = reward_weight UpperCamelCase :str = value_weight UpperCamelCase :Tuple = max_position_embeddings UpperCamelCase :Tuple = block_size UpperCamelCase :Optional[int] = action_dim UpperCamelCase :int = observation_dim UpperCamelCase :List[str] = transition_dim UpperCamelCase :List[Any] = learning_rate UpperCamelCase :Optional[Any] = n_layer UpperCamelCase :Any = n_head UpperCamelCase :List[str] = n_embd UpperCamelCase :Any = embd_pdrop UpperCamelCase :str = attn_pdrop UpperCamelCase :Union[str, Any] = resid_pdrop UpperCamelCase :Optional[Any] = initializer_range UpperCamelCase :List[Any] = layer_norm_eps UpperCamelCase :Optional[int] = kaiming_initializer_range UpperCamelCase :Tuple = use_cache super().__init__(pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase )
38
1
import argparse import io import requests import torch from omegaconf import OmegaConf from diffusers import AutoencoderKL from diffusers.pipelines.stable_diffusion.convert_from_ckpt import ( assign_to_checkpoint, conv_attn_to_linear, create_vae_diffusers_config, renew_vae_attention_paths, renew_vae_resnet_paths, ) def SCREAMING_SNAKE_CASE_ ( __magic_name__ : int , __magic_name__ : Any ) -> Any: """simple docstring""" UpperCamelCase :List[str] = checkpoint UpperCamelCase :Optional[int] = {} UpperCamelCase :Any = vae_state_dict["""encoder.conv_in.weight"""] UpperCamelCase :Union[str, Any] = vae_state_dict["""encoder.conv_in.bias"""] UpperCamelCase :Optional[int] = vae_state_dict["""encoder.conv_out.weight"""] UpperCamelCase :str = vae_state_dict["""encoder.conv_out.bias"""] UpperCamelCase :str = vae_state_dict["""encoder.norm_out.weight"""] UpperCamelCase :Optional[int] = vae_state_dict["""encoder.norm_out.bias"""] UpperCamelCase :Optional[int] = vae_state_dict["""decoder.conv_in.weight"""] UpperCamelCase :int = vae_state_dict["""decoder.conv_in.bias"""] UpperCamelCase :str = vae_state_dict["""decoder.conv_out.weight"""] UpperCamelCase :Union[str, Any] = vae_state_dict["""decoder.conv_out.bias"""] UpperCamelCase :Optional[Any] = vae_state_dict["""decoder.norm_out.weight"""] UpperCamelCase :List[Any] = vae_state_dict["""decoder.norm_out.bias"""] UpperCamelCase :List[str] = vae_state_dict["""quant_conv.weight"""] UpperCamelCase :Any = vae_state_dict["""quant_conv.bias"""] UpperCamelCase :int = vae_state_dict["""post_quant_conv.weight"""] UpperCamelCase :List[Any] = vae_state_dict["""post_quant_conv.bias"""] # Retrieves the keys for the encoder down blocks only UpperCamelCase :List[str] = len({""".""".join(layer.split(""".""" )[:3] ) for layer in vae_state_dict if """encoder.down""" in layer} ) UpperCamelCase :Optional[int] = { layer_id: [key for key in vae_state_dict if f"""down.{layer_id}""" in key] for layer_id in range(__magic_name__ ) } # Retrieves the keys for the decoder up blocks only UpperCamelCase :Optional[int] = len({""".""".join(layer.split(""".""" )[:3] ) for layer in vae_state_dict if """decoder.up""" in layer} ) UpperCamelCase :Optional[Any] = { layer_id: [key for key in vae_state_dict if f"""up.{layer_id}""" in key] for layer_id in range(__magic_name__ ) } for i in range(__magic_name__ ): UpperCamelCase :Tuple = [key for key in down_blocks[i] if f"""down.{i}""" in key and f"""down.{i}.downsample""" not in key] if f"""encoder.down.{i}.downsample.conv.weight""" in vae_state_dict: UpperCamelCase :Any = vae_state_dict.pop( f"""encoder.down.{i}.downsample.conv.weight""" ) UpperCamelCase :Union[str, Any] = vae_state_dict.pop( f"""encoder.down.{i}.downsample.conv.bias""" ) UpperCamelCase :Any = renew_vae_resnet_paths(__magic_name__ ) UpperCamelCase :str = {"""old""": f"""down.{i}.block""", """new""": f"""down_blocks.{i}.resnets"""} assign_to_checkpoint(__magic_name__ , __magic_name__ , __magic_name__ , additional_replacements=[meta_path] , config=__magic_name__ ) UpperCamelCase :Optional[Any] = [key for key in vae_state_dict if """encoder.mid.block""" in key] UpperCamelCase :Dict = 2 for i in range(1 , num_mid_res_blocks + 1 ): UpperCamelCase :int = [key for key in mid_resnets if f"""encoder.mid.block_{i}""" in key] UpperCamelCase :List[Any] = renew_vae_resnet_paths(__magic_name__ ) UpperCamelCase :str = {"""old""": f"""mid.block_{i}""", """new""": f"""mid_block.resnets.{i - 1}"""} assign_to_checkpoint(__magic_name__ , __magic_name__ , __magic_name__ , additional_replacements=[meta_path] , config=__magic_name__ ) UpperCamelCase :List[Any] = [key for key in vae_state_dict if """encoder.mid.attn""" in key] UpperCamelCase :List[str] = renew_vae_attention_paths(__magic_name__ ) UpperCamelCase :List[str] = {"""old""": """mid.attn_1""", """new""": """mid_block.attentions.0"""} assign_to_checkpoint(__magic_name__ , __magic_name__ , __magic_name__ , additional_replacements=[meta_path] , config=__magic_name__ ) conv_attn_to_linear(__magic_name__ ) for i in range(__magic_name__ ): UpperCamelCase :Optional[int] = num_up_blocks - 1 - i UpperCamelCase :List[Any] = [ key for key in up_blocks[block_id] if f"""up.{block_id}""" in key and f"""up.{block_id}.upsample""" not in key ] if f"""decoder.up.{block_id}.upsample.conv.weight""" in vae_state_dict: UpperCamelCase :str = vae_state_dict[ f"""decoder.up.{block_id}.upsample.conv.weight""" ] UpperCamelCase :int = vae_state_dict[ f"""decoder.up.{block_id}.upsample.conv.bias""" ] UpperCamelCase :int = renew_vae_resnet_paths(__magic_name__ ) UpperCamelCase :List[Any] = {"""old""": f"""up.{block_id}.block""", """new""": f"""up_blocks.{i}.resnets"""} assign_to_checkpoint(__magic_name__ , __magic_name__ , __magic_name__ , additional_replacements=[meta_path] , config=__magic_name__ ) UpperCamelCase :Optional[int] = [key for key in vae_state_dict if """decoder.mid.block""" in key] UpperCamelCase :str = 2 for i in range(1 , num_mid_res_blocks + 1 ): UpperCamelCase :Optional[Any] = [key for key in mid_resnets if f"""decoder.mid.block_{i}""" in key] UpperCamelCase :List[str] = renew_vae_resnet_paths(__magic_name__ ) UpperCamelCase :int = {"""old""": f"""mid.block_{i}""", """new""": f"""mid_block.resnets.{i - 1}"""} assign_to_checkpoint(__magic_name__ , __magic_name__ , __magic_name__ , additional_replacements=[meta_path] , config=__magic_name__ ) UpperCamelCase :List[Any] = [key for key in vae_state_dict if """decoder.mid.attn""" in key] UpperCamelCase :Optional[Any] = renew_vae_attention_paths(__magic_name__ ) UpperCamelCase :Union[str, Any] = {"""old""": """mid.attn_1""", """new""": """mid_block.attentions.0"""} assign_to_checkpoint(__magic_name__ , __magic_name__ , __magic_name__ , additional_replacements=[meta_path] , config=__magic_name__ ) conv_attn_to_linear(__magic_name__ ) return new_checkpoint def SCREAMING_SNAKE_CASE_ ( __magic_name__ : str , __magic_name__ : str , ) -> Optional[int]: """simple docstring""" UpperCamelCase :Tuple = requests.get( """ https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml""" ) UpperCamelCase :int = io.BytesIO(r.content ) UpperCamelCase :str = OmegaConf.load(__magic_name__ ) UpperCamelCase :str = 512 UpperCamelCase :List[Any] = """cuda""" if torch.cuda.is_available() else """cpu""" if checkpoint_path.endswith("""safetensors""" ): from safetensors import safe_open UpperCamelCase :List[str] = {} with safe_open(__magic_name__ , framework="""pt""" , device="""cpu""" ) as f: for key in f.keys(): UpperCamelCase :List[str] = f.get_tensor(__magic_name__ ) else: UpperCamelCase :Optional[int] = torch.load(__magic_name__ , map_location=__magic_name__ )["""state_dict"""] # Convert the VAE model. UpperCamelCase :Union[str, Any] = create_vae_diffusers_config(__magic_name__ , image_size=__magic_name__ ) UpperCamelCase :Union[str, Any] = custom_convert_ldm_vae_checkpoint(__magic_name__ , __magic_name__ ) UpperCamelCase :str = AutoencoderKL(**__magic_name__ ) vae.load_state_dict(__magic_name__ ) vae.save_pretrained(__magic_name__ ) if __name__ == "__main__": UpperCAmelCase_ : Any = argparse.ArgumentParser() parser.add_argument('''--vae_pt_path''', default=None, type=str, required=True, help='''Path to the VAE.pt to convert.''') parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the VAE.pt to convert.''') UpperCAmelCase_ : Any = parser.parse_args() vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
38
import math import numpy as np import qiskit from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute def SCREAMING_SNAKE_CASE_ ( __magic_name__ : int = 3 ) -> qiskit.result.counts.Counts: """simple docstring""" if isinstance(__magic_name__ , __magic_name__ ): raise TypeError("""number of qubits must be a integer.""" ) if number_of_qubits <= 0: raise ValueError("""number of qubits must be > 0.""" ) if math.floor(__magic_name__ ) != number_of_qubits: raise ValueError("""number of qubits must be exact integer.""" ) if number_of_qubits > 10: raise ValueError("""number of qubits too large to simulate(>10).""" ) UpperCamelCase :int = QuantumRegister(__magic_name__ , """qr""" ) UpperCamelCase :str = ClassicalRegister(__magic_name__ , """cr""" ) UpperCamelCase :str = QuantumCircuit(__magic_name__ , __magic_name__ ) UpperCamelCase :List[Any] = number_of_qubits for i in range(__magic_name__ ): quantum_circuit.h(number_of_qubits - i - 1 ) counter -= 1 for j in range(__magic_name__ ): quantum_circuit.cp(np.pi / 2 ** (counter - j) , __magic_name__ , __magic_name__ ) for k in range(number_of_qubits // 2 ): quantum_circuit.swap(__magic_name__ , number_of_qubits - k - 1 ) # measure all the qubits quantum_circuit.measure(__magic_name__ , __magic_name__ ) # simulate with 10000 shots UpperCamelCase :str = Aer.get_backend("""qasm_simulator""" ) UpperCamelCase :Dict = execute(__magic_name__ , __magic_name__ , shots=1_0000 ) return job.result().get_counts(__magic_name__ ) if __name__ == "__main__": print( F'''Total count for quantum fourier transform state is: \ {quantum_fourier_transform(3)}''' )
38
1
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" stooge(__magic_name__ , 0 , len(__magic_name__ ) - 1 ) return arr def SCREAMING_SNAKE_CASE_ ( __magic_name__ : str , __magic_name__ : Union[str, Any] , __magic_name__ : List[Any] ) -> str: """simple docstring""" if i >= h: return # If first element is smaller than the last then swap them if arr[i] > arr[h]: UpperCamelCase , UpperCamelCase :Dict = arr[h], arr[i] # If there are more than 2 elements in the array if h - i + 1 > 2: UpperCamelCase :Dict = (int)((h - i + 1) / 3 ) # Recursively sort first 2/3 elements stooge(__magic_name__ , __magic_name__ , (h - t) ) # Recursively sort last 2/3 elements stooge(__magic_name__ , i + t , (__magic_name__) ) # Recursively sort first 2/3 elements stooge(__magic_name__ , __magic_name__ , (h - t) ) if __name__ == "__main__": UpperCAmelCase_ : List[str] = input('''Enter numbers separated by a comma:\n''').strip() UpperCAmelCase_ : Union[str, Any] = [int(item) for item in user_input.split(''',''')] print(stooge_sort(unsorted))
38
import unittest from pathlib import Path from tempfile import TemporaryDirectory from transformers import AutoConfig, TFAutoModel, is_tensorflow_text_available, is_tf_available from transformers.models.bert.tokenization_bert import BertTokenizer from transformers.testing_utils import require_tensorflow_text, require_tf, slow if is_tf_available(): import tensorflow as tf if is_tensorflow_text_available(): from transformers.models.bert import TFBertTokenizer UpperCAmelCase_ : Optional[Any] = ['''bert-base-uncased''', '''bert-base-cased'''] UpperCAmelCase_ : List[str] = '''hf-internal-testing/tiny-bert-tf-only''' if is_tf_available(): class _SCREAMING_SNAKE_CASE ( tf.keras.Model ): def __init__( self : List[str] , __lowerCamelCase : Union[str, Any] ): super().__init__() UpperCamelCase :Any = tokenizer UpperCamelCase :List[str] = AutoConfig.from_pretrained(__lowerCamelCase ) UpperCamelCase :List[str] = TFAutoModel.from_config(__lowerCamelCase ) def _A ( self : Tuple , __lowerCamelCase : str ): UpperCamelCase :str = self.tokenizer(__lowerCamelCase ) UpperCamelCase :Any = self.bert(**__lowerCamelCase ) return out["pooler_output"] @require_tf @require_tensorflow_text class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): def _A ( self : Dict ): super().setUp() UpperCamelCase :int = [ BertTokenizer.from_pretrained(__lowerCamelCase ) for checkpoint in (TOKENIZER_CHECKPOINTS * 2) ] # repeat for when fast_bert_tokenizer=false UpperCamelCase :Any = [TFBertTokenizer.from_pretrained(__lowerCamelCase ) for checkpoint in TOKENIZER_CHECKPOINTS] + [ TFBertTokenizer.from_pretrained(__lowerCamelCase , use_fast_bert_tokenizer=__lowerCamelCase ) for checkpoint in TOKENIZER_CHECKPOINTS ] assert len(self.tokenizers ) == len(self.tf_tokenizers ) UpperCamelCase :Any = [ """This is a straightforward English test sentence.""", """This one has some weird characters\rto\nsee\r\nif those\u00E9break things.""", """Now we're going to add some Chinese: 一 二 三 一二三""", """And some much more rare Chinese: 齉 堃 齉堃""", """Je vais aussi écrire en français pour tester les accents""", """Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ""", ] UpperCamelCase :Union[str, Any] = list(zip(self.test_sentences , self.test_sentences[::-1] ) ) def _A ( self : Optional[int] ): for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ): for test_inputs in (self.test_sentences, self.paired_sentences): UpperCamelCase :Any = tokenizer(__lowerCamelCase , return_tensors="""tf""" , padding="""longest""" ) UpperCamelCase :str = tf_tokenizer(__lowerCamelCase ) for key in python_outputs.keys(): self.assertTrue(tf.reduce_all(python_outputs[key].shape == tf_outputs[key].shape ) ) self.assertTrue(tf.reduce_all(tf.cast(python_outputs[key] , tf.intaa ) == tf_outputs[key] ) ) @slow def _A ( self : Dict ): for tf_tokenizer in self.tf_tokenizers: UpperCamelCase :str = tf_tokenizer(self.paired_sentences ) UpperCamelCase :Any = tf_tokenizer( text=[sentence[0] for sentence in self.paired_sentences] , text_pair=[sentence[1] for sentence in self.paired_sentences] , ) for key in merged_outputs.keys(): self.assertTrue(tf.reduce_all(tf.cast(merged_outputs[key] , tf.intaa ) == separated_outputs[key] ) ) @slow def _A ( self : List[str] ): for tf_tokenizer in self.tf_tokenizers: UpperCamelCase :List[Any] = tf.function(__lowerCamelCase ) for test_inputs in (self.test_sentences, self.paired_sentences): UpperCamelCase :Any = tf.constant(__lowerCamelCase ) UpperCamelCase :List[str] = compiled_tokenizer(__lowerCamelCase ) UpperCamelCase :Optional[Any] = tf_tokenizer(__lowerCamelCase ) for key in eager_outputs.keys(): self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) ) @slow def _A ( self : Tuple ): for tf_tokenizer in self.tf_tokenizers: UpperCamelCase :List[str] = ModelToSave(tokenizer=__lowerCamelCase ) UpperCamelCase :Union[str, Any] = tf.convert_to_tensor(self.test_sentences ) UpperCamelCase :Union[str, Any] = model(__lowerCamelCase ) # Build model with some sample inputs with TemporaryDirectory() as tempdir: UpperCamelCase :List[str] = Path(__lowerCamelCase ) / """saved.model""" model.save(__lowerCamelCase ) UpperCamelCase :List[Any] = tf.keras.models.load_model(__lowerCamelCase ) UpperCamelCase :Dict = loaded_model(__lowerCamelCase ) # We may see small differences because the loaded model is compiled, so we need an epsilon for the test self.assertLessEqual(tf.reduce_max(tf.abs(out - loaded_output ) ) , 1E-5 )
38
1
from math import atan, cos, radians, sin, tan from .haversine_distance import haversine_distance UpperCAmelCase_ : List[Any] = 6_37_81_37.0 UpperCAmelCase_ : Dict = 6_35_67_52.31_42_45 UpperCAmelCase_ : Tuple = 6_37_81_37 def SCREAMING_SNAKE_CASE_ ( __magic_name__ : float , __magic_name__ : float , __magic_name__ : float , __magic_name__ : float ) -> float: """simple docstring""" UpperCamelCase :str = (AXIS_A - AXIS_B) / AXIS_A # Parametric latitudes # https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude UpperCamelCase :List[str] = atan((1 - flattening) * tan(radians(__magic_name__ ) ) ) UpperCamelCase :Union[str, Any] = atan((1 - flattening) * tan(radians(__magic_name__ ) ) ) # Compute central angle between two points # using haversine theta. sigma = haversine_distance / equatorial radius UpperCamelCase :Any = haversine_distance(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) / EQUATORIAL_RADIUS # Intermediate P and Q values UpperCamelCase :Tuple = (b_lata + b_lata) / 2 UpperCamelCase :Any = (b_lata - b_lata) / 2 # Intermediate X value # X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2) UpperCamelCase :Optional[int] = (sin(__magic_name__ ) ** 2) * (cos(__magic_name__ ) ** 2) UpperCamelCase :Any = cos(sigma / 2 ) ** 2 UpperCamelCase :Union[str, Any] = (sigma - sin(__magic_name__ )) * (x_numerator / x_demonimator) # Intermediate Y value # Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2) UpperCamelCase :int = (cos(__magic_name__ ) ** 2) * (sin(__magic_name__ ) ** 2) UpperCamelCase :Optional[Any] = sin(sigma / 2 ) ** 2 UpperCamelCase :Any = (sigma + sin(__magic_name__ )) * (y_numerator / y_denominator) return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value))) if __name__ == "__main__": import doctest doctest.testmod()
38
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from pathlib import Path import torch from ...utils import is_npu_available, is_xpu_available from .config_args import ClusterConfig, default_json_config_file from .config_utils import SubcommandHelpFormatter UpperCAmelCase_ : Any = '''Create a default config file for Accelerate with only a few flags set.''' def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Optional[int]="no" , __magic_name__ : str = default_json_config_file , __magic_name__ : bool = False ) -> str: """simple docstring""" UpperCamelCase :Any = Path(__magic_name__ ) path.parent.mkdir(parents=__magic_name__ , exist_ok=__magic_name__ ) if path.exists(): print( f"""Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.""" ) return False UpperCamelCase :Dict = mixed_precision.lower() if mixed_precision not in ["no", "fp16", "bf16", "fp8"]: raise ValueError( f"""`mixed_precision` should be one of 'no', 'fp16', 'bf16', or 'fp8'. Received {mixed_precision}""" ) UpperCamelCase :Optional[Any] = { """compute_environment""": """LOCAL_MACHINE""", """mixed_precision""": mixed_precision, } if torch.cuda.is_available(): UpperCamelCase :Union[str, Any] = torch.cuda.device_count() UpperCamelCase :List[Any] = num_gpus UpperCamelCase :Dict = False if num_gpus > 1: UpperCamelCase :Any = """MULTI_GPU""" else: UpperCamelCase :Any = """NO""" elif is_xpu_available() and use_xpu: UpperCamelCase :Optional[Any] = torch.xpu.device_count() UpperCamelCase :Optional[int] = num_xpus UpperCamelCase :int = False if num_xpus > 1: UpperCamelCase :Union[str, Any] = """MULTI_XPU""" else: UpperCamelCase :Union[str, Any] = """NO""" elif is_npu_available(): UpperCamelCase :List[Any] = torch.npu.device_count() UpperCamelCase :Optional[Any] = num_npus UpperCamelCase :Tuple = False if num_npus > 1: UpperCamelCase :Optional[Any] = """MULTI_NPU""" else: UpperCamelCase :List[Any] = """NO""" else: UpperCamelCase :Any = 0 UpperCamelCase :Optional[Any] = True UpperCamelCase :Optional[Any] = 1 UpperCamelCase :List[str] = """NO""" UpperCamelCase :int = ClusterConfig(**__magic_name__ ) config.to_json_file(__magic_name__ ) return path def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Dict , __magic_name__ : Tuple ) -> List[str]: """simple docstring""" UpperCamelCase :Dict = parser.add_parser("""default""" , parents=__magic_name__ , help=__magic_name__ , formatter_class=__magic_name__ ) parser.add_argument( """--config_file""" , default=__magic_name__ , help=( """The path to use to store the config file. Will default to a file named default_config.yaml in the cache """ """location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have """ """such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed """ """with 'huggingface'.""" ) , dest="""save_location""" , ) parser.add_argument( """--mixed_precision""" , choices=["""no""", """fp16""", """bf16"""] , type=__magic_name__ , help="""Whether or not to use mixed precision training. """ """Choose between FP16 and BF16 (bfloat16) training. """ """BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.""" , default="""no""" , ) parser.set_defaults(func=__magic_name__ ) return parser def SCREAMING_SNAKE_CASE_ ( __magic_name__ : List[Any] ) -> List[str]: """simple docstring""" UpperCamelCase :Optional[Any] = write_basic_config(args.mixed_precision , args.save_location ) if config_file: print(f"""accelerate configuration saved at {config_file}""" )
38
1
import numpy as np # Importing the Keras libraries and packages import tensorflow as tf from tensorflow.keras import layers, models if __name__ == "__main__": # Initialising the CNN # (Sequential- Building the model layer by layer) UpperCAmelCase_ : List[Any] = models.Sequential() # Step 1 - Convolution # Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel # (3,3) is the kernel size (filter matrix) classifier.add( layers.ConvaD(32, (3, 3), input_shape=(64, 64, 3), activation='''relu''') ) # Step 2 - Pooling classifier.add(layers.MaxPoolingaD(pool_size=(2, 2))) # Adding a second convolutional layer classifier.add(layers.ConvaD(32, (3, 3), activation='''relu''')) classifier.add(layers.MaxPoolingaD(pool_size=(2, 2))) # Step 3 - Flattening classifier.add(layers.Flatten()) # Step 4 - Full connection classifier.add(layers.Dense(units=1_28, activation='''relu''')) classifier.add(layers.Dense(units=1, activation='''sigmoid''')) # Compiling the CNN classifier.compile( optimizer='''adam''', loss='''binary_crossentropy''', metrics=['''accuracy'''] ) # Part 2 - Fitting the CNN to the images # Load Trained model weights # from keras.models import load_model # regressor=load_model('cnn.h5') UpperCAmelCase_ : Union[str, Any] = tf.keras.preprocessing.image.ImageDataGenerator( rescale=1.0 / 2_55, shear_range=0.2, zoom_range=0.2, horizontal_flip=True ) UpperCAmelCase_ : Optional[int] = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 2_55) UpperCAmelCase_ : int = train_datagen.flow_from_directory( '''dataset/training_set''', target_size=(64, 64), batch_size=32, class_mode='''binary''' ) UpperCAmelCase_ : Optional[Any] = test_datagen.flow_from_directory( '''dataset/test_set''', target_size=(64, 64), batch_size=32, class_mode='''binary''' ) classifier.fit_generator( training_set, steps_per_epoch=5, epochs=30, validation_data=test_set ) classifier.save('''cnn.h5''') # Part 3 - Making new predictions UpperCAmelCase_ : Any = tf.keras.preprocessing.image.load_img( '''dataset/single_prediction/image.png''', target_size=(64, 64) ) UpperCAmelCase_ : List[Any] = tf.keras.preprocessing.image.img_to_array(test_image) UpperCAmelCase_ : Any = np.expand_dims(test_image, axis=0) UpperCAmelCase_ : Optional[Any] = classifier.predict(test_image) # training_set.class_indices if result[0][0] == 0: UpperCAmelCase_ : List[str] = '''Normal''' if result[0][0] == 1: UpperCAmelCase_ : Tuple = '''Abnormality detected'''
38
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) UpperCAmelCase_ : str = {'''configuration_opt''': ['''OPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''OPTConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ : Tuple = [ '''OPT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''OPTForCausalLM''', '''OPTModel''', '''OPTPreTrainedModel''', '''OPTForSequenceClassification''', '''OPTForQuestionAnswering''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ : Any = ['''TFOPTForCausalLM''', '''TFOPTModel''', '''TFOPTPreTrainedModel'''] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ : Optional[Any] = [ '''FlaxOPTForCausalLM''', '''FlaxOPTModel''', '''FlaxOPTPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_opt import ( OPT_PRETRAINED_MODEL_ARCHIVE_LIST, OPTForCausalLM, OPTForQuestionAnswering, OPTForSequenceClassification, OPTModel, OPTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel else: import sys UpperCAmelCase_ : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
38
1
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : float , __magic_name__ : float , __magic_name__ : float , __magic_name__ : float , __magic_name__ : float , ) -> float: """simple docstring""" UpperCamelCase :int = [redshift, radiation_density, matter_density, dark_energy] if any(p < 0 for p in parameters ): raise ValueError("""All input parameters must be positive""" ) if any(p > 1 for p in parameters[1:4] ): raise ValueError("""Relative densities cannot be greater than one""" ) else: UpperCamelCase :List[str] = 1 - (matter_density + radiation_density + dark_energy) UpperCamelCase :Optional[Any] = ( radiation_density * (redshift + 1) ** 4 + matter_density * (redshift + 1) ** 3 + curvature * (redshift + 1) ** 2 + dark_energy ) UpperCamelCase :str = hubble_constant * e_a ** (1 / 2) return hubble if __name__ == "__main__": import doctest # run doctest doctest.testmod() # demo LCDM approximation UpperCAmelCase_ : Dict = 0.3 print( hubble_parameter( hubble_constant=68.3, radiation_density=1E-4, matter_density=matter_density, dark_energy=1 - matter_density, redshift=0, ) )
38
import gc import random import unittest import numpy as np import torch from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline from diffusers.pipelines.shap_e import ShapERenderer from diffusers.utils import floats_tensor, load_image, load_numpy, slow from diffusers.utils.testing_utils import require_torch_gpu, torch_device from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference class _SCREAMING_SNAKE_CASE ( _a , unittest.TestCase ): snake_case__ : Tuple = ShapEImgaImgPipeline snake_case__ : Optional[Any] = ["""image"""] snake_case__ : Union[str, Any] = ["""image"""] snake_case__ : Optional[Any] = [ """num_images_per_prompt""", """num_inference_steps""", """generator""", """latents""", """guidance_scale""", """frame_size""", """output_type""", """return_dict""", ] snake_case__ : List[str] = False @property def _A ( self : Any ): return 32 @property def _A ( self : Any ): return 32 @property def _A ( self : Optional[Any] ): return self.time_input_dim * 4 @property def _A ( self : Union[str, Any] ): return 8 @property def _A ( self : int ): torch.manual_seed(0 ) UpperCamelCase :Union[str, Any] = CLIPVisionConfig( hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , ) UpperCamelCase :Optional[int] = CLIPVisionModel(__lowerCamelCase ) return model @property def _A ( self : str ): UpperCamelCase :Optional[int] = CLIPImageProcessor( crop_size=224 , do_center_crop=__lowerCamelCase , do_normalize=__lowerCamelCase , do_resize=__lowerCamelCase , image_mean=[0.48145466, 0.4578275, 0.40821073] , image_std=[0.26862954, 0.26130258, 0.27577711] , resample=3 , size=224 , ) return image_processor @property def _A ( self : Tuple ): torch.manual_seed(0 ) UpperCamelCase :Dict = { """num_attention_heads""": 2, """attention_head_dim""": 16, """embedding_dim""": self.time_input_dim, """num_embeddings""": 32, """embedding_proj_dim""": self.text_embedder_hidden_size, """time_embed_dim""": self.time_embed_dim, """num_layers""": 1, """clip_embed_dim""": self.time_input_dim * 2, """additional_embeddings""": 0, """time_embed_act_fn""": """gelu""", """norm_in_type""": """layer""", """embedding_proj_norm_type""": """layer""", """encoder_hid_proj_type""": None, """added_emb_type""": None, } UpperCamelCase :int = PriorTransformer(**__lowerCamelCase ) return model @property def _A ( self : Optional[int] ): torch.manual_seed(0 ) UpperCamelCase :str = { """param_shapes""": ( (self.renderer_dim, 93), (self.renderer_dim, 8), (self.renderer_dim, 8), (self.renderer_dim, 8), ), """d_latent""": self.time_input_dim, """d_hidden""": self.renderer_dim, """n_output""": 12, """background""": ( 0.1, 0.1, 0.1, ), } UpperCamelCase :List[str] = ShapERenderer(**__lowerCamelCase ) return model def _A ( self : str ): UpperCamelCase :int = self.dummy_prior UpperCamelCase :Any = self.dummy_image_encoder UpperCamelCase :Dict = self.dummy_image_processor UpperCamelCase :List[Any] = self.dummy_renderer UpperCamelCase :int = HeunDiscreteScheduler( beta_schedule="""exp""" , num_train_timesteps=1_024 , prediction_type="""sample""" , use_karras_sigmas=__lowerCamelCase , clip_sample=__lowerCamelCase , clip_sample_range=1.0 , ) UpperCamelCase :Optional[Any] = { """prior""": prior, """image_encoder""": image_encoder, """image_processor""": image_processor, """renderer""": renderer, """scheduler""": scheduler, } return components def _A ( self : int , __lowerCamelCase : int , __lowerCamelCase : Any=0 ): UpperCamelCase :Any = floats_tensor((1, 3, 64, 64) , rng=random.Random(__lowerCamelCase ) ).to(__lowerCamelCase ) if str(__lowerCamelCase ).startswith("""mps""" ): UpperCamelCase :List[Any] = torch.manual_seed(__lowerCamelCase ) else: UpperCamelCase :Optional[int] = torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase ) UpperCamelCase :Optional[Any] = { """image""": input_image, """generator""": generator, """num_inference_steps""": 1, """frame_size""": 32, """output_type""": """np""", } return inputs def _A ( self : List[str] ): UpperCamelCase :Dict = """cpu""" UpperCamelCase :List[Any] = self.get_dummy_components() UpperCamelCase :Optional[int] = self.pipeline_class(**__lowerCamelCase ) UpperCamelCase :int = pipe.to(__lowerCamelCase ) pipe.set_progress_bar_config(disable=__lowerCamelCase ) UpperCamelCase :Optional[Any] = pipe(**self.get_dummy_inputs(__lowerCamelCase ) ) UpperCamelCase :Dict = output.images[0] UpperCamelCase :List[Any] = image[0, -3:, -3:, -1] assert image.shape == (20, 32, 32, 3) UpperCamelCase :Dict = np.array( [ 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, ] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def _A ( self : List[Any] ): # NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches self._test_inference_batch_consistent(batch_sizes=[1, 2] ) def _A ( self : List[Any] ): UpperCamelCase :str = torch_device == """cpu""" UpperCamelCase :int = True self._test_inference_batch_single_identical( batch_size=2 , test_max_difference=__lowerCamelCase , relax_max_difference=__lowerCamelCase , ) def _A ( self : List[Any] ): UpperCamelCase :List[Any] = self.get_dummy_components() UpperCamelCase :Optional[int] = self.pipeline_class(**__lowerCamelCase ) UpperCamelCase :List[Any] = pipe.to(__lowerCamelCase ) pipe.set_progress_bar_config(disable=__lowerCamelCase ) UpperCamelCase :Any = 1 UpperCamelCase :int = 2 UpperCamelCase :Union[str, Any] = self.get_dummy_inputs(__lowerCamelCase ) for key in inputs.keys(): if key in self.batch_params: UpperCamelCase :str = batch_size * [inputs[key]] UpperCamelCase :Optional[int] = pipe(**__lowerCamelCase , num_images_per_prompt=__lowerCamelCase )[0] assert images.shape[0] == batch_size * num_images_per_prompt @slow @require_torch_gpu class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): def _A ( self : Any ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def _A ( self : Any ): UpperCamelCase :Optional[Any] = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/shap_e/corgi.png""" ) UpperCamelCase :Any = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/shap_e/test_shap_e_img2img_out.npy""" ) UpperCamelCase :Union[str, Any] = ShapEImgaImgPipeline.from_pretrained("""openai/shap-e-img2img""" ) UpperCamelCase :List[str] = pipe.to(__lowerCamelCase ) pipe.set_progress_bar_config(disable=__lowerCamelCase ) UpperCamelCase :Optional[Any] = torch.Generator(device=__lowerCamelCase ).manual_seed(0 ) UpperCamelCase :Optional[int] = pipe( __lowerCamelCase , generator=__lowerCamelCase , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type="""np""" , ).images[0] assert images.shape == (20, 64, 64, 3) assert_mean_pixel_difference(__lowerCamelCase , __lowerCamelCase )
38
1
import torch from diffusers import DDIMParallelScheduler from .test_schedulers import SchedulerCommonTest class _SCREAMING_SNAKE_CASE ( _a ): snake_case__ : Tuple = (DDIMParallelScheduler,) snake_case__ : Dict = (("""eta""", 0.0), ("""num_inference_steps""", 5_0)) def _A ( self : Optional[Any] , **__lowerCamelCase : Any ): UpperCamelCase :Any = { """num_train_timesteps""": 1_000, """beta_start""": 0.0001, """beta_end""": 0.02, """beta_schedule""": """linear""", """clip_sample""": True, } config.update(**__lowerCamelCase ) return config def _A ( self : Optional[Any] , **__lowerCamelCase : List[Any] ): UpperCamelCase :Dict = self.scheduler_classes[0] UpperCamelCase :List[str] = self.get_scheduler_config(**__lowerCamelCase ) UpperCamelCase :List[str] = scheduler_class(**__lowerCamelCase ) UpperCamelCase , UpperCamelCase :int = 10, 0.0 UpperCamelCase :Any = self.dummy_model() UpperCamelCase :int = self.dummy_sample_deter scheduler.set_timesteps(__lowerCamelCase ) for t in scheduler.timesteps: UpperCamelCase :Tuple = model(__lowerCamelCase , __lowerCamelCase ) UpperCamelCase :Optional[int] = scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ).prev_sample return sample def _A ( self : Optional[int] ): for timesteps in [100, 500, 1_000]: self.check_over_configs(num_train_timesteps=__lowerCamelCase ) def _A ( self : Any ): for steps_offset in [0, 1]: self.check_over_configs(steps_offset=__lowerCamelCase ) UpperCamelCase :int = self.scheduler_classes[0] UpperCamelCase :List[str] = self.get_scheduler_config(steps_offset=1 ) UpperCamelCase :List[str] = scheduler_class(**__lowerCamelCase ) scheduler.set_timesteps(5 ) assert torch.equal(scheduler.timesteps , torch.LongTensor([801, 601, 401, 201, 1] ) ) def _A ( self : List[str] ): for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ): self.check_over_configs(beta_start=__lowerCamelCase , beta_end=__lowerCamelCase ) def _A ( self : Tuple ): for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=__lowerCamelCase ) def _A ( self : Optional[int] ): for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=__lowerCamelCase ) def _A ( self : List[Any] ): for clip_sample in [True, False]: self.check_over_configs(clip_sample=__lowerCamelCase ) def _A ( self : List[str] ): for timestep_spacing in ["trailing", "leading"]: self.check_over_configs(timestep_spacing=__lowerCamelCase ) def _A ( self : Optional[Any] ): for rescale_betas_zero_snr in [True, False]: self.check_over_configs(rescale_betas_zero_snr=__lowerCamelCase ) def _A ( self : str ): self.check_over_configs(thresholding=__lowerCamelCase ) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs( thresholding=__lowerCamelCase , prediction_type=__lowerCamelCase , sample_max_value=__lowerCamelCase , ) def _A ( self : Tuple ): for t in [1, 10, 49]: self.check_over_forward(time_step=__lowerCamelCase ) def _A ( self : List[str] ): for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 500] ): self.check_over_forward(time_step=__lowerCamelCase , num_inference_steps=__lowerCamelCase ) def _A ( self : Dict ): for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0] ): self.check_over_forward(time_step=__lowerCamelCase , eta=__lowerCamelCase ) def _A ( self : Optional[Any] ): UpperCamelCase :str = self.scheduler_classes[0] UpperCamelCase :Tuple = self.get_scheduler_config() UpperCamelCase :int = scheduler_class(**__lowerCamelCase ) assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(420 , 400 ) - 0.14771 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(980 , 960 ) - 0.32460 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(487 , 486 ) - 0.00979 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(999 , 998 ) - 0.02 ) ) < 1E-5 def _A ( self : Dict ): UpperCamelCase :List[Any] = self.scheduler_classes[0] UpperCamelCase :Optional[Any] = self.get_scheduler_config() UpperCamelCase :List[Any] = scheduler_class(**__lowerCamelCase ) UpperCamelCase , UpperCamelCase :Optional[Any] = 10, 0.0 scheduler.set_timesteps(__lowerCamelCase ) UpperCamelCase :Any = self.dummy_model() UpperCamelCase :Union[str, Any] = self.dummy_sample_deter UpperCamelCase :Optional[Any] = self.dummy_sample_deter + 0.1 UpperCamelCase :List[str] = self.dummy_sample_deter - 0.1 UpperCamelCase :List[Any] = samplea.shape[0] UpperCamelCase :Dict = torch.stack([samplea, samplea, samplea] , dim=0 ) UpperCamelCase :List[Any] = torch.arange(__lowerCamelCase )[0:3, None].repeat(1 , __lowerCamelCase ) UpperCamelCase :str = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) ) UpperCamelCase :str = scheduler.batch_step_no_noise(__lowerCamelCase , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , __lowerCamelCase ) UpperCamelCase :Any = torch.sum(torch.abs(__lowerCamelCase ) ) UpperCamelCase :Tuple = torch.mean(torch.abs(__lowerCamelCase ) ) assert abs(result_sum.item() - 1147.7904 ) < 1E-2 assert abs(result_mean.item() - 0.4982 ) < 1E-3 def _A ( self : List[str] ): UpperCamelCase :Tuple = self.full_loop() UpperCamelCase :Any = torch.sum(torch.abs(__lowerCamelCase ) ) UpperCamelCase :Tuple = torch.mean(torch.abs(__lowerCamelCase ) ) assert abs(result_sum.item() - 172.0067 ) < 1E-2 assert abs(result_mean.item() - 0.223967 ) < 1E-3 def _A ( self : Optional[Any] ): UpperCamelCase :Optional[int] = self.full_loop(prediction_type="""v_prediction""" ) UpperCamelCase :List[Any] = torch.sum(torch.abs(__lowerCamelCase ) ) UpperCamelCase :Optional[int] = torch.mean(torch.abs(__lowerCamelCase ) ) assert abs(result_sum.item() - 52.5302 ) < 1E-2 assert abs(result_mean.item() - 0.0684 ) < 1E-3 def _A ( self : List[str] ): # We specify different beta, so that the first alpha is 0.99 UpperCamelCase :int = self.full_loop(set_alpha_to_one=__lowerCamelCase , beta_start=0.01 ) UpperCamelCase :Optional[Any] = torch.sum(torch.abs(__lowerCamelCase ) ) UpperCamelCase :List[Any] = torch.mean(torch.abs(__lowerCamelCase ) ) assert abs(result_sum.item() - 149.8295 ) < 1E-2 assert abs(result_mean.item() - 0.1951 ) < 1E-3 def _A ( self : List[Any] ): # We specify different beta, so that the first alpha is 0.99 UpperCamelCase :Tuple = self.full_loop(set_alpha_to_one=__lowerCamelCase , beta_start=0.01 ) UpperCamelCase :Tuple = torch.sum(torch.abs(__lowerCamelCase ) ) UpperCamelCase :List[Any] = torch.mean(torch.abs(__lowerCamelCase ) ) assert abs(result_sum.item() - 149.0784 ) < 1E-2 assert abs(result_mean.item() - 0.1941 ) < 1E-3
38
from sklearn.metrics import fa_score, matthews_corrcoef import datasets from .record_evaluation import evaluate as evaluate_record UpperCAmelCase_ : int = '''\ @article{wang2019superglue, title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems}, author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R}, journal={arXiv preprint arXiv:1905.00537}, year={2019} } ''' UpperCAmelCase_ : Optional[Any] = '''\ SuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after GLUE with a new set of more difficult language understanding tasks, improved resources, and a new public leaderboard. ''' UpperCAmelCase_ : int = ''' Compute SuperGLUE evaluation metric associated to each SuperGLUE dataset. Args: predictions: list of predictions to score. Depending on the SuperGlUE subset: - for \'record\': list of question-answer dictionaries with the following keys: - \'idx\': index of the question as specified by the dataset - \'prediction_text\': the predicted answer text - for \'multirc\': list of question-answer dictionaries with the following keys: - \'idx\': index of the question-answer pair as specified by the dataset - \'prediction\': the predicted answer label - otherwise: list of predicted labels references: list of reference labels. Depending on the SuperGLUE subset: - for \'record\': list of question-answers dictionaries with the following keys: - \'idx\': index of the question as specified by the dataset - \'answers\': list of possible answers - otherwise: list of reference labels Returns: depending on the SuperGLUE subset: - for \'record\': - \'exact_match\': Exact match between answer and gold answer - \'f1\': F1 score - for \'multirc\': - \'exact_match\': Exact match between answer and gold answer - \'f1_m\': Per-question macro-F1 score - \'f1_a\': Average F1 score over all answers - for \'axb\': \'matthews_correlation\': Matthew Correlation - for \'cb\': - \'accuracy\': Accuracy - \'f1\': F1 score - for all others: - \'accuracy\': Accuracy Examples: >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'copa\') # any of ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"] >>> predictions = [0, 1] >>> references = [0, 1] >>> results = super_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {\'accuracy\': 1.0} >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'cb\') >>> predictions = [0, 1] >>> references = [0, 1] >>> results = super_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {\'accuracy\': 1.0, \'f1\': 1.0} >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'record\') >>> predictions = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'prediction_text\': \'answer\'}] >>> references = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'answers\': [\'answer\', \'another_answer\']}] >>> results = super_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {\'exact_match\': 1.0, \'f1\': 1.0} >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'multirc\') >>> predictions = [{\'idx\': {\'answer\': 0, \'paragraph\': 0, \'question\': 0}, \'prediction\': 0}, {\'idx\': {\'answer\': 1, \'paragraph\': 2, \'question\': 3}, \'prediction\': 1}] >>> references = [0, 1] >>> results = super_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {\'exact_match\': 1.0, \'f1_m\': 1.0, \'f1_a\': 1.0} >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'axb\') >>> references = [0, 1] >>> predictions = [0, 1] >>> results = super_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {\'matthews_correlation\': 1.0} ''' def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Optional[Any] , __magic_name__ : Optional[Any] ) -> Union[str, Any]: """simple docstring""" return float((preds == labels).mean() ) def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Dict , __magic_name__ : int , __magic_name__ : Any="binary" ) -> Dict: """simple docstring""" UpperCamelCase :List[str] = simple_accuracy(__magic_name__ , __magic_name__ ) UpperCamelCase :Dict = float(fa_score(y_true=__magic_name__ , y_pred=__magic_name__ , average=__magic_name__ ) ) return { "accuracy": acc, "f1": fa, } def SCREAMING_SNAKE_CASE_ ( __magic_name__ : List[Any] , __magic_name__ : Optional[Any] ) -> Optional[Any]: """simple docstring""" UpperCamelCase :Optional[Any] = {} for id_pred, label in zip(__magic_name__ , __magic_name__ ): UpperCamelCase :str = f"""{id_pred['idx']['paragraph']}-{id_pred['idx']['question']}""" UpperCamelCase :Union[str, Any] = id_pred["""prediction"""] if question_id in question_map: question_map[question_id].append((pred, label) ) else: UpperCamelCase :Dict = [(pred, label)] UpperCamelCase , UpperCamelCase :Optional[int] = [], [] for question, preds_labels in question_map.items(): UpperCamelCase , UpperCamelCase :Optional[Any] = zip(*__magic_name__ ) UpperCamelCase :Optional[int] = fa_score(y_true=__magic_name__ , y_pred=__magic_name__ , average="""macro""" ) fas.append(__magic_name__ ) UpperCamelCase :int = int(sum(pred == label for pred, label in preds_labels ) == len(__magic_name__ ) ) ems.append(__magic_name__ ) UpperCamelCase :Optional[int] = float(sum(__magic_name__ ) / len(__magic_name__ ) ) UpperCamelCase :str = sum(__magic_name__ ) / len(__magic_name__ ) UpperCamelCase :Tuple = float(fa_score(y_true=__magic_name__ , y_pred=[id_pred["""prediction"""] for id_pred in ids_preds] ) ) return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a} @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class _SCREAMING_SNAKE_CASE ( datasets.Metric ): def _A ( self : str ): if self.config_name not in [ "boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg", ]: raise KeyError( """You should supply a configuration name selected in """ """[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]""" ) return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format="""numpy""" if not self.config_name == """record""" and not self.config_name == """multirc""" else None , ) def _A ( self : Optional[Any] ): if self.config_name == "record": return { "predictions": { "idx": { "passage": datasets.Value("""int64""" ), "query": datasets.Value("""int64""" ), }, "prediction_text": datasets.Value("""string""" ), }, "references": { "idx": { "passage": datasets.Value("""int64""" ), "query": datasets.Value("""int64""" ), }, "answers": datasets.Sequence(datasets.Value("""string""" ) ), }, } elif self.config_name == "multirc": return { "predictions": { "idx": { "answer": datasets.Value("""int64""" ), "paragraph": datasets.Value("""int64""" ), "question": datasets.Value("""int64""" ), }, "prediction": datasets.Value("""int64""" ), }, "references": datasets.Value("""int64""" ), } else: return { "predictions": datasets.Value("""int64""" ), "references": datasets.Value("""int64""" ), } def _A ( self : List[str] , __lowerCamelCase : Tuple , __lowerCamelCase : str ): if self.config_name == "axb": return {"matthews_correlation": matthews_corrcoef(__lowerCamelCase , __lowerCamelCase )} elif self.config_name == "cb": return acc_and_fa(__lowerCamelCase , __lowerCamelCase , fa_avg="""macro""" ) elif self.config_name == "record": UpperCamelCase :Optional[Any] = [ { """qas""": [ {"""id""": ref["""idx"""]["""query"""], """answers""": [{"""text""": ans} for ans in ref["""answers"""]]} for ref in references ] } ] UpperCamelCase :Tuple = {pred["""idx"""]["""query"""]: pred["""prediction_text"""] for pred in predictions} return evaluate_record(__lowerCamelCase , __lowerCamelCase )[0] elif self.config_name == "multirc": return evaluate_multirc(__lowerCamelCase , __lowerCamelCase ) elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]: return {"accuracy": simple_accuracy(__lowerCamelCase , __lowerCamelCase )} else: raise KeyError( """You should supply a configuration name selected in """ """[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]""" )
38
1
import os import pytest import yaml from datasets.features.features import Features, Value from datasets.info import DatasetInfo, DatasetInfosDict @pytest.mark.parametrize( """files""" , [ ["""full:README.md""", """dataset_infos.json"""], ["""empty:README.md""", """dataset_infos.json"""], ["""dataset_infos.json"""], ["""full:README.md"""], ] , ) def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Optional[Any] , __magic_name__ : Dict ) -> Tuple: """simple docstring""" UpperCamelCase :Dict = tmp_path_factory.mktemp("""dset_infos_dir""" ) if "full:README.md" in files: with open(dataset_infos_dir / """README.md""" , """w""" ) as f: f.write("""---\ndataset_info:\n dataset_size: 42\n---""" ) if "empty:README.md" in files: with open(dataset_infos_dir / """README.md""" , """w""" ) as f: f.write("""""" ) # we want to support dataset_infos.json for backward compatibility if "dataset_infos.json" in files: with open(dataset_infos_dir / """dataset_infos.json""" , """w""" ) as f: f.write("""{\"default\": {\"dataset_size\": 42}}""" ) UpperCamelCase :List[str] = DatasetInfosDict.from_directory(__magic_name__ ) assert dataset_infos assert dataset_infos["default"].dataset_size == 42 @pytest.mark.parametrize( """dataset_info""" , [ DatasetInfo(), DatasetInfo( description="""foo""" , features=Features({"""a""": Value("""int32""" )} ) , builder_name="""builder""" , config_name="""config""" , version="""1.0.0""" , splits=[{"""name""": """train"""}] , download_size=42 , ), ] , ) def SCREAMING_SNAKE_CASE_ ( __magic_name__ : int , __magic_name__ : DatasetInfo ) -> List[str]: """simple docstring""" UpperCamelCase :Tuple = str(__magic_name__ ) dataset_info.write_to_directory(__magic_name__ ) UpperCamelCase :List[str] = DatasetInfo.from_directory(__magic_name__ ) assert dataset_info == reloaded assert os.path.exists(os.path.join(__magic_name__ , """dataset_info.json""" ) ) def SCREAMING_SNAKE_CASE_ ( ) -> int: """simple docstring""" UpperCamelCase :Tuple = DatasetInfo( description="""foo""" , citation="""bar""" , homepage="""https://foo.bar""" , license="""CC0""" , features=Features({"""a""": Value("""int32""" )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name="""builder""" , config_name="""config""" , version="""1.0.0""" , splits=[{"""name""": """train""", """num_examples""": 42}] , download_checksums={} , download_size=1337 , post_processing_size=442 , dataset_size=1234 , size_in_bytes=1337 + 442 + 1234 , ) UpperCamelCase :str = dataset_info._to_yaml_dict() assert sorted(__magic_name__ ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML ) for key in DatasetInfo._INCLUDED_INFO_IN_YAML: assert key in dataset_info_yaml_dict assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) ) UpperCamelCase :int = yaml.safe_dump(__magic_name__ ) UpperCamelCase :Dict = yaml.safe_load(__magic_name__ ) assert dataset_info_yaml_dict == reloaded def SCREAMING_SNAKE_CASE_ ( ) -> List[str]: """simple docstring""" UpperCamelCase :Tuple = DatasetInfo() UpperCamelCase :Tuple = dataset_info._to_yaml_dict() assert dataset_info_yaml_dict == {} @pytest.mark.parametrize( """dataset_infos_dict""" , [ DatasetInfosDict(), DatasetInfosDict({"""default""": DatasetInfo()} ), DatasetInfosDict({"""my_config_name""": DatasetInfo()} ), DatasetInfosDict( { """default""": DatasetInfo( description="""foo""" , features=Features({"""a""": Value("""int32""" )} ) , builder_name="""builder""" , config_name="""config""" , version="""1.0.0""" , splits=[{"""name""": """train"""}] , download_size=42 , ) } ), DatasetInfosDict( { """v1""": DatasetInfo(dataset_size=42 ), """v2""": DatasetInfo(dataset_size=1337 ), } ), ] , ) def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Optional[Any] , __magic_name__ : DatasetInfosDict ) -> List[Any]: """simple docstring""" UpperCamelCase :List[str] = str(__magic_name__ ) dataset_infos_dict.write_to_directory(__magic_name__ ) UpperCamelCase :int = DatasetInfosDict.from_directory(__magic_name__ ) # the config_name of the dataset_infos_dict take over the attribute for config_name, dataset_info in dataset_infos_dict.items(): UpperCamelCase :int = config_name # the yaml representation doesn't include fields like description or citation # so we just test that we can recover what we can from the yaml UpperCamelCase :int = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() ) assert dataset_infos_dict == reloaded if dataset_infos_dict: assert os.path.exists(os.path.join(__magic_name__ , """README.md""" ) )
38
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): def __init__( self : List[str] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Any=13 , __lowerCamelCase : Dict=3 , __lowerCamelCase : int=224 , __lowerCamelCase : Any=30 , __lowerCamelCase : Tuple=400 , __lowerCamelCase : int=True , __lowerCamelCase : List[str]=None , __lowerCamelCase : Any=True , __lowerCamelCase : Dict=[0.5, 0.5, 0.5] , __lowerCamelCase : List[Any]=[0.5, 0.5, 0.5] , ): UpperCamelCase :List[Any] = size if size is not None else {"""height""": 18, """width""": 18} UpperCamelCase :str = parent UpperCamelCase :Optional[int] = batch_size UpperCamelCase :Dict = num_channels UpperCamelCase :str = image_size UpperCamelCase :Dict = min_resolution UpperCamelCase :str = max_resolution UpperCamelCase :Union[str, Any] = do_resize UpperCamelCase :Optional[Any] = size UpperCamelCase :Any = do_normalize UpperCamelCase :Optional[Any] = image_mean UpperCamelCase :Tuple = image_std def _A ( self : int ): return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, } @require_torch @require_vision class _SCREAMING_SNAKE_CASE ( _a , unittest.TestCase ): snake_case__ : List[Any] = ViTImageProcessor if is_vision_available() else None def _A ( self : str ): UpperCamelCase :Tuple = EfficientFormerImageProcessorTester(self ) @property def _A ( self : List[str] ): return self.image_proc_tester.prepare_image_processor_dict() def _A ( self : int ): UpperCamelCase :List[Any] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__lowerCamelCase , """image_mean""" ) ) self.assertTrue(hasattr(__lowerCamelCase , """image_std""" ) ) self.assertTrue(hasattr(__lowerCamelCase , """do_normalize""" ) ) self.assertTrue(hasattr(__lowerCamelCase , """do_resize""" ) ) self.assertTrue(hasattr(__lowerCamelCase , """size""" ) ) def _A ( self : Optional[int] ): pass def _A ( self : str ): # Initialize image_processor UpperCamelCase :Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images UpperCamelCase :Union[str, Any] = prepare_image_inputs(self.image_proc_tester , equal_resolution=__lowerCamelCase ) for image in image_inputs: self.assertIsInstance(__lowerCamelCase , Image.Image ) # Test not batched input UpperCamelCase :List[str] = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size["""height"""], self.image_proc_tester.size["""width"""], ) , ) # Test batched UpperCamelCase :List[Any] = image_processor(__lowerCamelCase , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size["""height"""], self.image_proc_tester.size["""width"""], ) , ) def _A ( self : Union[str, Any] ): # Initialize image_processor UpperCamelCase :Optional[int] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors UpperCamelCase :List[Any] = prepare_image_inputs(self.image_proc_tester , equal_resolution=__lowerCamelCase , numpify=__lowerCamelCase ) for image in image_inputs: self.assertIsInstance(__lowerCamelCase , np.ndarray ) # Test not batched input UpperCamelCase :Dict = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size["""height"""], self.image_proc_tester.size["""width"""], ) , ) # Test batched UpperCamelCase :Tuple = image_processor(__lowerCamelCase , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size["""height"""], self.image_proc_tester.size["""width"""], ) , ) def _A ( self : List[Any] ): # Initialize image_processor UpperCamelCase :List[str] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors UpperCamelCase :Any = prepare_image_inputs(self.image_proc_tester , equal_resolution=__lowerCamelCase , torchify=__lowerCamelCase ) for image in image_inputs: self.assertIsInstance(__lowerCamelCase , torch.Tensor ) # Test not batched input UpperCamelCase :List[Any] = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size["""height"""], self.image_proc_tester.size["""width"""], ) , ) # Test batched UpperCamelCase :str = image_processor(__lowerCamelCase , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size["""height"""], self.image_proc_tester.size["""width"""], ) , )
38
1
import tensorflow as tf from ...tf_utils import shape_list class _SCREAMING_SNAKE_CASE ( tf.keras.layers.Layer ): def __init__( self : Tuple , __lowerCamelCase : Optional[int] , __lowerCamelCase : Dict , __lowerCamelCase : List[Any] , __lowerCamelCase : Dict , __lowerCamelCase : Optional[int]=1 , __lowerCamelCase : Tuple=False , **__lowerCamelCase : Dict ): super().__init__(**__lowerCamelCase ) UpperCamelCase :str = vocab_size UpperCamelCase :Union[str, Any] = d_embed UpperCamelCase :Optional[Any] = d_proj UpperCamelCase :Union[str, Any] = cutoffs + [vocab_size] UpperCamelCase :int = [0] + self.cutoffs UpperCamelCase :List[str] = div_val UpperCamelCase :Optional[Any] = self.cutoffs[0] UpperCamelCase :Optional[int] = len(self.cutoffs ) - 1 UpperCamelCase :Tuple = self.shortlist_size + self.n_clusters UpperCamelCase :List[str] = keep_order UpperCamelCase :str = [] UpperCamelCase :Optional[Any] = [] def _A ( self : Tuple , __lowerCamelCase : List[str] ): if self.n_clusters > 0: UpperCamelCase :List[str] = self.add_weight( shape=(self.n_clusters, self.d_embed) , initializer="""zeros""" , trainable=__lowerCamelCase , name="""cluster_weight""" ) UpperCamelCase :Union[str, Any] = self.add_weight( shape=(self.n_clusters,) , initializer="""zeros""" , trainable=__lowerCamelCase , name="""cluster_bias""" ) if self.div_val == 1: for i in range(len(self.cutoffs ) ): if self.d_proj != self.d_embed: UpperCamelCase :Optional[Any] = self.add_weight( shape=(self.d_embed, self.d_proj) , initializer="""zeros""" , trainable=__lowerCamelCase , name=F"""out_projs_._{i}""" , ) self.out_projs.append(__lowerCamelCase ) else: self.out_projs.append(__lowerCamelCase ) UpperCamelCase :List[str] = self.add_weight( shape=(self.vocab_size, self.d_embed) , initializer="""zeros""" , trainable=__lowerCamelCase , name=F"""out_layers_._{i}_._weight""" , ) UpperCamelCase :Dict = self.add_weight( shape=(self.vocab_size,) , initializer="""zeros""" , trainable=__lowerCamelCase , name=F"""out_layers_._{i}_._bias""" , ) self.out_layers.append((weight, bias) ) else: for i in range(len(self.cutoffs ) ): UpperCamelCase , UpperCamelCase :int = self.cutoff_ends[i], self.cutoff_ends[i + 1] UpperCamelCase :Optional[int] = self.d_embed // (self.div_val**i) UpperCamelCase :Tuple = self.add_weight( shape=(d_emb_i, self.d_proj) , initializer="""zeros""" , trainable=__lowerCamelCase , name=F"""out_projs_._{i}""" ) self.out_projs.append(__lowerCamelCase ) UpperCamelCase :Optional[Any] = self.add_weight( shape=(r_idx - l_idx, d_emb_i) , initializer="""zeros""" , trainable=__lowerCamelCase , name=F"""out_layers_._{i}_._weight""" , ) UpperCamelCase :List[str] = self.add_weight( shape=(r_idx - l_idx,) , initializer="""zeros""" , trainable=__lowerCamelCase , name=F"""out_layers_._{i}_._bias""" , ) self.out_layers.append((weight, bias) ) super().build(__lowerCamelCase ) @staticmethod def _A ( __lowerCamelCase : List[Any] , __lowerCamelCase : Dict , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[Any]=None ): UpperCamelCase :Tuple = x if proj is not None: UpperCamelCase :Any = tf.einsum("""ibd,ed->ibe""" , __lowerCamelCase , __lowerCamelCase ) return tf.einsum("""ibd,nd->ibn""" , __lowerCamelCase , __lowerCamelCase ) + b @staticmethod def _A ( __lowerCamelCase : List[Any] , __lowerCamelCase : List[Any] ): UpperCamelCase :Any = shape_list(__lowerCamelCase ) UpperCamelCase :Optional[Any] = tf.range(lp_size[0] , dtype=target.dtype ) UpperCamelCase :str = tf.stack([r, target] , 1 ) return tf.gather_nd(__lowerCamelCase , __lowerCamelCase ) def _A ( self : str , __lowerCamelCase : Tuple , __lowerCamelCase : Tuple , __lowerCamelCase : int=True , __lowerCamelCase : List[Any]=False ): UpperCamelCase :List[Any] = 0 if self.n_clusters == 0: UpperCamelCase :List[str] = self._logit(__lowerCamelCase , self.out_layers[0][0] , self.out_layers[0][1] , self.out_projs[0] ) if target is not None: UpperCamelCase :Tuple = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=__lowerCamelCase , logits=__lowerCamelCase ) UpperCamelCase :int = tf.nn.log_softmax(__lowerCamelCase , axis=-1 ) else: UpperCamelCase :List[str] = shape_list(__lowerCamelCase ) UpperCamelCase :List[Any] = [] UpperCamelCase :Union[str, Any] = tf.zeros(hidden_sizes[:2] ) for i in range(len(self.cutoffs ) ): UpperCamelCase , UpperCamelCase :str = self.cutoff_ends[i], self.cutoff_ends[i + 1] if target is not None: UpperCamelCase :int = (target >= l_idx) & (target < r_idx) UpperCamelCase :int = tf.where(__lowerCamelCase ) UpperCamelCase :Tuple = tf.boolean_mask(__lowerCamelCase , __lowerCamelCase ) - l_idx if self.div_val == 1: UpperCamelCase :Tuple = self.out_layers[0][0][l_idx:r_idx] UpperCamelCase :Union[str, Any] = self.out_layers[0][1][l_idx:r_idx] else: UpperCamelCase :List[str] = self.out_layers[i][0] UpperCamelCase :Union[str, Any] = self.out_layers[i][1] if i == 0: UpperCamelCase :str = tf.concat([cur_W, self.cluster_weight] , 0 ) UpperCamelCase :List[str] = tf.concat([cur_b, self.cluster_bias] , 0 ) UpperCamelCase :Union[str, Any] = self._logit(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , self.out_projs[0] ) UpperCamelCase :List[Any] = tf.nn.log_softmax(__lowerCamelCase ) out.append(head_logprob[..., : self.cutoffs[0]] ) if target is not None: UpperCamelCase :str = tf.boolean_mask(__lowerCamelCase , __lowerCamelCase ) UpperCamelCase :Tuple = self._gather_logprob(__lowerCamelCase , __lowerCamelCase ) else: UpperCamelCase :int = self._logit(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , self.out_projs[i] ) UpperCamelCase :Optional[int] = tf.nn.log_softmax(__lowerCamelCase ) UpperCamelCase :Dict = self.cutoffs[0] + i - 1 # No probability for the head cluster UpperCamelCase :List[str] = head_logprob[..., cluster_prob_idx, None] + tail_logprob out.append(__lowerCamelCase ) if target is not None: UpperCamelCase :List[Any] = tf.boolean_mask(__lowerCamelCase , __lowerCamelCase ) UpperCamelCase :List[str] = tf.boolean_mask(__lowerCamelCase , __lowerCamelCase ) UpperCamelCase :Any = self._gather_logprob(__lowerCamelCase , __lowerCamelCase ) cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1] if target is not None: loss += tf.scatter_nd(__lowerCamelCase , -cur_logprob , shape_list(__lowerCamelCase ) ) UpperCamelCase :Dict = tf.concat(__lowerCamelCase , axis=-1 ) if target is not None: if return_mean: UpperCamelCase :Dict = tf.reduce_mean(__lowerCamelCase ) # Add the training-time loss value to the layer using `self.add_loss()`. self.add_loss(__lowerCamelCase ) # Log the loss as a metric (we could log arbitrary metrics, # including different metrics for training and inference. self.add_metric(__lowerCamelCase , name=self.name , aggregation="""mean""" if return_mean else """""" ) return out
38
from collections.abc import Generator from math import sin def SCREAMING_SNAKE_CASE_ ( __magic_name__ : bytes ) -> bytes: """simple docstring""" if len(__magic_name__ ) != 32: raise ValueError("""Input must be of length 32""" ) UpperCamelCase :int = B"""""" for i in [3, 2, 1, 0]: little_endian += string_aa[8 * i : 8 * i + 8] return little_endian def SCREAMING_SNAKE_CASE_ ( __magic_name__ : int ) -> bytes: """simple docstring""" if i < 0: raise ValueError("""Input must be non-negative""" ) UpperCamelCase :Any = format(__magic_name__ , """08x""" )[-8:] UpperCamelCase :Union[str, Any] = B"""""" for i in [3, 2, 1, 0]: little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode("""utf-8""" ) return little_endian_hex def SCREAMING_SNAKE_CASE_ ( __magic_name__ : bytes ) -> bytes: """simple docstring""" UpperCamelCase :str = B"""""" for char in message: bit_string += format(__magic_name__ , """08b""" ).encode("""utf-8""" ) UpperCamelCase :Any = format(len(__magic_name__ ) , """064b""" ).encode("""utf-8""" ) # Pad bit_string to a multiple of 512 chars bit_string += b"1" while len(__magic_name__ ) % 512 != 448: bit_string += b"0" bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] ) return bit_string def SCREAMING_SNAKE_CASE_ ( __magic_name__ : bytes ) -> Generator[list[int], None, None]: """simple docstring""" if len(__magic_name__ ) % 512 != 0: raise ValueError("""Input must have length that's a multiple of 512""" ) for pos in range(0 , len(__magic_name__ ) , 512 ): UpperCamelCase :Tuple = bit_string[pos : pos + 512] UpperCamelCase :Optional[int] = [] for i in range(0 , 512 , 32 ): block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) ) yield block_words def SCREAMING_SNAKE_CASE_ ( __magic_name__ : int ) -> int: """simple docstring""" if i < 0: raise ValueError("""Input must be non-negative""" ) UpperCamelCase :List[str] = format(__magic_name__ , """032b""" ) UpperCamelCase :Any = """""" for c in i_str: new_str += "1" if c == "0" else "0" return int(__magic_name__ , 2 ) def SCREAMING_SNAKE_CASE_ ( __magic_name__ : int , __magic_name__ : int ) -> int: """simple docstring""" return (a + b) % 2**32 def SCREAMING_SNAKE_CASE_ ( __magic_name__ : int , __magic_name__ : int ) -> int: """simple docstring""" if i < 0: raise ValueError("""Input must be non-negative""" ) if shift < 0: raise ValueError("""Shift must be non-negative""" ) return ((i << shift) ^ (i >> (32 - shift))) % 2**32 def SCREAMING_SNAKE_CASE_ ( __magic_name__ : bytes ) -> bytes: """simple docstring""" UpperCamelCase :Tuple = preprocess(__magic_name__ ) UpperCamelCase :List[str] = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )] # Starting states UpperCamelCase :Union[str, Any] = 0X67_45_23_01 UpperCamelCase :Union[str, Any] = 0XEF_CD_AB_89 UpperCamelCase :List[str] = 0X98_BA_DC_FE UpperCamelCase :int = 0X10_32_54_76 UpperCamelCase :int = [ 7, 12, 17, 22, 7, 12, 17, 22, 7, 12, 17, 22, 7, 12, 17, 22, 5, 9, 14, 20, 5, 9, 14, 20, 5, 9, 14, 20, 5, 9, 14, 20, 4, 11, 16, 23, 4, 11, 16, 23, 4, 11, 16, 23, 4, 11, 16, 23, 6, 10, 15, 21, 6, 10, 15, 21, 6, 10, 15, 21, 6, 10, 15, 21, ] # Process bit string in chunks, each with 16 32-char words for block_words in get_block_words(__magic_name__ ): UpperCamelCase :Optional[Any] = aa UpperCamelCase :Any = ba UpperCamelCase :Tuple = ca UpperCamelCase :List[str] = da # Hash current chunk for i in range(64 ): if i <= 15: # f = (b & c) | (not_32(b) & d) # Alternate definition for f UpperCamelCase :int = d ^ (b & (c ^ d)) UpperCamelCase :Optional[int] = i elif i <= 31: # f = (d & b) | (not_32(d) & c) # Alternate definition for f UpperCamelCase :str = c ^ (d & (b ^ c)) UpperCamelCase :Union[str, Any] = (5 * i + 1) % 16 elif i <= 47: UpperCamelCase :str = b ^ c ^ d UpperCamelCase :Optional[int] = (3 * i + 5) % 16 else: UpperCamelCase :List[str] = c ^ (b | not_aa(__magic_name__ )) UpperCamelCase :int = (7 * i) % 16 UpperCamelCase :Dict = (f + a + added_consts[i] + block_words[g]) % 2**32 UpperCamelCase :Tuple = d UpperCamelCase :str = c UpperCamelCase :Tuple = b UpperCamelCase :Optional[Any] = sum_aa(__magic_name__ , left_rotate_aa(__magic_name__ , shift_amounts[i] ) ) # Add hashed chunk to running total UpperCamelCase :List[str] = sum_aa(__magic_name__ , __magic_name__ ) UpperCamelCase :str = sum_aa(__magic_name__ , __magic_name__ ) UpperCamelCase :int = sum_aa(__magic_name__ , __magic_name__ ) UpperCamelCase :Optional[Any] = sum_aa(__magic_name__ , __magic_name__ ) UpperCamelCase :Optional[Any] = reformat_hex(__magic_name__ ) + reformat_hex(__magic_name__ ) + reformat_hex(__magic_name__ ) + reformat_hex(__magic_name__ ) return digest if __name__ == "__main__": import doctest doctest.testmod()
38
1
import argparse import torch # Step 1. clone https://github.com/microsoft/unilm # Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd # Step 3. cd unilm # Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink # import classes from unilm.wavlm.WavLM import WavLM as WavLMOrig from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig from transformers import WavLMConfig, WavLMModel, logging logging.set_verbosity_info() UpperCAmelCase_ : Optional[Any] = logging.get_logger(__name__) UpperCAmelCase_ : Optional[Any] = { '''post_extract_proj''': '''feature_projection.projection''', '''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''', '''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''', '''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''', '''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''', '''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''', '''self_attn.grep_linear''': '''encoder.layers.*.attention.gru_rel_pos_linear''', '''self_attn.relative_attention_bias''': '''encoder.layers.*.attention.rel_attn_embed''', '''self_attn.grep_a''': '''encoder.layers.*.attention.gru_rel_pos_const''', '''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''', '''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''', '''fc2''': '''encoder.layers.*.feed_forward.output_dense''', '''final_layer_norm''': '''encoder.layers.*.final_layer_norm''', '''encoder.layer_norm''': '''encoder.layer_norm''', '''w2v_model.layer_norm''': '''feature_projection.layer_norm''', '''quantizer.weight_proj''': '''quantizer.weight_proj''', '''quantizer.vars''': '''quantizer.codevectors''', '''project_q''': '''project_q''', '''final_proj''': '''project_hid''', '''w2v_encoder.proj''': '''ctc_proj''', '''mask_emb''': '''masked_spec_embed''', } UpperCAmelCase_ : int = [ '''ctc_proj''', '''quantizer.weight_proj''', '''quantizer.codevectors''', '''project_q''', '''project_hid''', ] def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Dict , __magic_name__ : Dict , __magic_name__ : Union[str, Any] , __magic_name__ : Tuple , __magic_name__ : Optional[int] ) -> Dict: """simple docstring""" for attribute in key.split(""".""" ): UpperCamelCase :Dict = getattr(__magic_name__ , __magic_name__ ) if weight_type is not None: UpperCamelCase :Optional[int] = getattr(__magic_name__ , __magic_name__ ).shape else: UpperCamelCase :Optional[int] = hf_pointer.shape assert hf_shape == value.shape, ( f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be""" f""" {value.shape} for {full_name}""" ) if weight_type == "weight": UpperCamelCase :str = value elif weight_type == "weight_g": UpperCamelCase :int = value elif weight_type == "weight_v": UpperCamelCase :int = value elif weight_type == "bias": UpperCamelCase :List[Any] = value else: UpperCamelCase :Any = value logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" ) def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Optional[Any] , __magic_name__ : List[str] ) -> Optional[Any]: """simple docstring""" UpperCamelCase :Union[str, Any] = [] UpperCamelCase :Dict = fairseq_model.state_dict() UpperCamelCase :int = hf_model.feature_extractor for name, value in fairseq_dict.items(): UpperCamelCase :str = False if "conv_layers" in name: load_conv_layer( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , hf_model.config.feat_extract_norm == """group""" , ) UpperCamelCase :Union[str, Any] = True else: for key, mapped_key in MAPPING.items(): if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]: UpperCamelCase :Optional[int] = True if "*" in mapped_key: UpperCamelCase :List[Any] = name.split(__magic_name__ )[0].split(""".""" )[-2] UpperCamelCase :int = mapped_key.replace("""*""" , __magic_name__ ) if "weight_g" in name: UpperCamelCase :List[Any] = """weight_g""" elif "weight_v" in name: UpperCamelCase :List[Any] = """weight_v""" elif "bias" in name and "relative_attention_bias" not in name: UpperCamelCase :Any = """bias""" elif "weight" in name: # TODO: don't match quantizer.weight_proj UpperCamelCase :List[str] = """weight""" else: UpperCamelCase :Optional[int] = None set_recursively(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) continue if not is_used: unused_weights.append(__magic_name__ ) logger.warning(f"""Unused weights: {unused_weights}""" ) def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Any , __magic_name__ : str , __magic_name__ : int , __magic_name__ : int , __magic_name__ : List[str] ) -> Dict: """simple docstring""" UpperCamelCase :Dict = full_name.split("""conv_layers.""" )[-1] UpperCamelCase :int = name.split(""".""" ) UpperCamelCase :str = int(items[0] ) UpperCamelCase :str = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) UpperCamelCase :Tuple = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) UpperCamelCase :Dict = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was""" " found." ) UpperCamelCase :Tuple = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" ) UpperCamelCase :Union[str, Any] = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(__magic_name__ ) @torch.no_grad() def SCREAMING_SNAKE_CASE_ ( __magic_name__ : List[str] , __magic_name__ : List[Any] , __magic_name__ : str=None ) -> int: """simple docstring""" UpperCamelCase :List[Any] = torch.load(__magic_name__ ) UpperCamelCase :List[Any] = WavLMConfigOrig(checkpoint["""cfg"""] ) UpperCamelCase :int = WavLMOrig(__magic_name__ ) model.load_state_dict(checkpoint["""model"""] ) model.eval() if config_path is not None: UpperCamelCase :List[Any] = WavLMConfig.from_pretrained(__magic_name__ ) else: UpperCamelCase :Any = WavLMConfig() UpperCamelCase :Dict = WavLMModel(__magic_name__ ) recursively_load_weights(__magic_name__ , __magic_name__ ) hf_wavlm.save_pretrained(__magic_name__ ) if __name__ == "__main__": UpperCAmelCase_ : Union[str, Any] = argparse.ArgumentParser() parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') UpperCAmelCase_ : Optional[int] = parser.parse_args() convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
38
from typing import Callable, Optional from .. import Features from ..packaged_modules.generator.generator import Generator from .abc import AbstractDatasetInputStream class _SCREAMING_SNAKE_CASE ( _a ): def __init__( self : List[Any] , __lowerCamelCase : Callable , __lowerCamelCase : Optional[Features] = None , __lowerCamelCase : str = None , __lowerCamelCase : bool = False , __lowerCamelCase : bool = False , __lowerCamelCase : Optional[dict] = None , __lowerCamelCase : Optional[int] = None , **__lowerCamelCase : List[Any] , ): super().__init__( features=__lowerCamelCase , cache_dir=__lowerCamelCase , keep_in_memory=__lowerCamelCase , streaming=__lowerCamelCase , num_proc=__lowerCamelCase , **__lowerCamelCase , ) UpperCamelCase :Union[str, Any] = Generator( cache_dir=__lowerCamelCase , features=__lowerCamelCase , generator=__lowerCamelCase , gen_kwargs=__lowerCamelCase , **__lowerCamelCase , ) def _A ( self : List[str] ): # Build iterable dataset if self.streaming: UpperCamelCase :Any = self.builder.as_streaming_dataset(split="""train""" ) # Build regular (map-style) dataset else: UpperCamelCase :Tuple = None UpperCamelCase :Dict = None UpperCamelCase :Dict = None UpperCamelCase :List[str] = None self.builder.download_and_prepare( download_config=__lowerCamelCase , download_mode=__lowerCamelCase , verification_mode=__lowerCamelCase , base_path=__lowerCamelCase , num_proc=self.num_proc , ) UpperCamelCase :Tuple = self.builder.as_dataset( split="""train""" , verification_mode=__lowerCamelCase , in_memory=self.keep_in_memory ) return dataset
38
1
import logging import os import sys import warnings from dataclasses import dataclass, field from random import randint from typing import Optional import datasets import evaluate import numpy as np from datasets import DatasetDict, load_dataset import transformers from transformers import ( AutoConfig, AutoFeatureExtractor, AutoModelForAudioClassification, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version UpperCAmelCase_ : Optional[int] = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version('''4.31.0''') require_version('''datasets>=1.14.0''', '''To fix: pip install -r examples/pytorch/audio-classification/requirements.txt''') def SCREAMING_SNAKE_CASE_ ( __magic_name__ : np.ndarray , __magic_name__ : float , __magic_name__ : int = 1_6000 ) -> Tuple: """simple docstring""" UpperCamelCase :int = int(round(sample_rate * max_length ) ) if len(__magic_name__ ) <= sample_length: return wav UpperCamelCase :List[Any] = randint(0 , len(__magic_name__ ) - sample_length - 1 ) return wav[random_offset : random_offset + sample_length] @dataclass class _SCREAMING_SNAKE_CASE : snake_case__ : Optional[str] = field(default=_a , metadata={"""help""": """Name of a dataset from the datasets package"""} ) snake_case__ : Optional[str] = field( default=_a , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} ) snake_case__ : Optional[str] = field( default=_a , metadata={"""help""": """A file containing the training audio paths and labels."""} ) snake_case__ : Optional[str] = field( default=_a , metadata={"""help""": """A file containing the validation audio paths and labels."""} ) snake_case__ : str = field( default="""train""" , metadata={ """help""": """The name of the training data set split to use (via the datasets library). Defaults to 'train'""" } , ) snake_case__ : str = field( default="""validation""" , metadata={ """help""": ( """The name of the training data set split to use (via the datasets library). Defaults to 'validation'""" ) } , ) snake_case__ : str = field( default="""audio""" , metadata={"""help""": """The name of the dataset column containing the audio data. Defaults to 'audio'"""} , ) snake_case__ : str = field( default="""label""" , metadata={"""help""": """The name of the dataset column containing the labels. Defaults to 'label'"""} ) snake_case__ : Optional[int] = field( default=_a , metadata={ """help""": ( """For debugging purposes or quicker training, truncate the number of training examples to this """ """value if set.""" ) } , ) snake_case__ : Optional[int] = field( default=_a , metadata={ """help""": ( """For debugging purposes or quicker training, truncate the number of evaluation examples to this """ """value if set.""" ) } , ) snake_case__ : float = field( default=2_0 , metadata={"""help""": """Audio clips will be randomly cut to this length during training if the value is set."""} , ) @dataclass class _SCREAMING_SNAKE_CASE : snake_case__ : str = field( default="""facebook/wav2vec2-base""" , metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} , ) snake_case__ : Optional[str] = field( default=_a , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} ) snake_case__ : Optional[str] = field( default=_a , metadata={"""help""": """Where do you want to store the pretrained models downloaded from the Hub"""} ) snake_case__ : str = field( default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , ) snake_case__ : Optional[str] = field( default=_a , metadata={"""help""": """Name or path of preprocessor config."""} ) snake_case__ : bool = field( default=_a , metadata={"""help""": """Whether to freeze the feature encoder layers of the model."""} ) snake_case__ : bool = field( default=_a , metadata={"""help""": """Whether to generate an attention mask in the feature extractor."""} ) snake_case__ : bool = field( default=_a , metadata={ """help""": ( """Will use the token generated when running `huggingface-cli login` (necessary to use this script """ """with private models).""" ) } , ) snake_case__ : Optional[bool] = field( default=_a , metadata={"""help""": """Whether to freeze the feature extractor layers of the model."""} ) snake_case__ : bool = field( default=_a , metadata={"""help""": """Will enable to load a pretrained model whose head dimensions are different."""} , ) def _A ( self : List[str] ): if not self.freeze_feature_extractor and self.freeze_feature_encoder: warnings.warn( """The argument `--freeze_feature_extractor` is deprecated and """ """will be removed in a future version. Use `--freeze_feature_encoder`""" """instead. Setting `freeze_feature_encoder==True`.""" , __lowerCamelCase , ) if self.freeze_feature_extractor and not self.freeze_feature_encoder: raise ValueError( """The argument `--freeze_feature_extractor` is deprecated and """ """should not be used in combination with `--freeze_feature_encoder`.""" """Only make use of `--freeze_feature_encoder`.""" ) def SCREAMING_SNAKE_CASE_ ( ) -> Tuple: """simple docstring""" UpperCamelCase :Dict = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. UpperCamelCase , UpperCamelCase , UpperCamelCase :List[str] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: UpperCamelCase , UpperCamelCase , UpperCamelCase :Tuple = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("""run_audio_classification""" , __magic_name__ , __magic_name__ ) # Setup logging logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() UpperCamelCase :Any = training_args.get_process_log_level() logger.setLevel(__magic_name__ ) transformers.utils.logging.set_verbosity(__magic_name__ ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} """ + f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" ) logger.info(f"""Training/evaluation parameters {training_args}""" ) # Set seed before initializing model. set_seed(training_args.seed ) # Detecting last checkpoint. UpperCamelCase :Any = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: UpperCamelCase :Dict = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( f"""Output directory ({training_args.output_dir}) already exists and is not empty. """ """Use --overwrite_output_dir to train from scratch.""" ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """ """the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" ) # Initialize our dataset and prepare it for the audio classification task. UpperCamelCase :List[str] = DatasetDict() UpperCamelCase :Dict = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=data_args.train_split_name , use_auth_token=True if model_args.use_auth_token else None , ) UpperCamelCase :Optional[Any] = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=data_args.eval_split_name , use_auth_token=True if model_args.use_auth_token else None , ) if data_args.audio_column_name not in raw_datasets["train"].column_names: raise ValueError( f"""--audio_column_name {data_args.audio_column_name} not found in dataset '{data_args.dataset_name}'. """ """Make sure to set `--audio_column_name` to the correct audio column - one of """ f"""{', '.join(raw_datasets['train'].column_names )}.""" ) if data_args.label_column_name not in raw_datasets["train"].column_names: raise ValueError( f"""--label_column_name {data_args.label_column_name} not found in dataset '{data_args.dataset_name}'. """ """Make sure to set `--label_column_name` to the correct text column - one of """ f"""{', '.join(raw_datasets['train'].column_names )}.""" ) # Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over # transformer outputs in the classifier, but it doesn't always lead to better accuracy UpperCamelCase :List[str] = AutoFeatureExtractor.from_pretrained( model_args.feature_extractor_name or model_args.model_name_or_path , return_attention_mask=model_args.attention_mask , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) # `datasets` takes care of automatically loading and resampling the audio, # so we just need to set the correct target sampling rate. UpperCamelCase :Tuple = raw_datasets.cast_column( data_args.audio_column_name , datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) ) UpperCamelCase :Dict = feature_extractor.model_input_names[0] def train_transforms(__magic_name__ : str ): UpperCamelCase :List[Any] = [] for audio in batch[data_args.audio_column_name]: UpperCamelCase :int = random_subsample( audio["""array"""] , max_length=data_args.max_length_seconds , sample_rate=feature_extractor.sampling_rate ) subsampled_wavs.append(__magic_name__ ) UpperCamelCase :str = feature_extractor(__magic_name__ , sampling_rate=feature_extractor.sampling_rate ) UpperCamelCase :Dict = {model_input_name: inputs.get(__magic_name__ )} UpperCamelCase :List[str] = list(batch[data_args.label_column_name] ) return output_batch def val_transforms(__magic_name__ : Tuple ): UpperCamelCase :Tuple = [audio["""array"""] for audio in batch[data_args.audio_column_name]] UpperCamelCase :Dict = feature_extractor(__magic_name__ , sampling_rate=feature_extractor.sampling_rate ) UpperCamelCase :Tuple = {model_input_name: inputs.get(__magic_name__ )} UpperCamelCase :Union[str, Any] = list(batch[data_args.label_column_name] ) return output_batch # Prepare label mappings. # We'll include these in the model's config to get human readable labels in the Inference API. UpperCamelCase :str = raw_datasets["""train"""].features[data_args.label_column_name].names UpperCamelCase , UpperCamelCase :Optional[int] = {}, {} for i, label in enumerate(__magic_name__ ): UpperCamelCase :Optional[int] = str(__magic_name__ ) UpperCamelCase :Dict = label # Load the accuracy metric from the datasets package UpperCamelCase :int = evaluate.load("""accuracy""" ) # Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with # `predictions` and `label_ids` fields) and has to return a dictionary string to float. def compute_metrics(__magic_name__ : Optional[Any] ): UpperCamelCase :Union[str, Any] = np.argmax(eval_pred.predictions , axis=1 ) return metric.compute(predictions=__magic_name__ , references=eval_pred.label_ids ) UpperCamelCase :Optional[int] = AutoConfig.from_pretrained( model_args.config_name or model_args.model_name_or_path , num_labels=len(__magic_name__ ) , labelaid=__magic_name__ , idalabel=__magic_name__ , finetuning_task="""audio-classification""" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) UpperCamelCase :str = AutoModelForAudioClassification.from_pretrained( model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=__magic_name__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , ) # freeze the convolutional waveform encoder if model_args.freeze_feature_encoder: model.freeze_feature_encoder() if training_args.do_train: if data_args.max_train_samples is not None: UpperCamelCase :Optional[int] = ( raw_datasets["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) ) ) # Set the training transforms raw_datasets["train"].set_transform(__magic_name__ , output_all_columns=__magic_name__ ) if training_args.do_eval: if data_args.max_eval_samples is not None: UpperCamelCase :str = ( raw_datasets["""eval"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) ) ) # Set the validation transforms raw_datasets["eval"].set_transform(__magic_name__ , output_all_columns=__magic_name__ ) # Initialize our trainer UpperCamelCase :str = Trainer( model=__magic_name__ , args=__magic_name__ , train_dataset=raw_datasets["""train"""] if training_args.do_train else None , eval_dataset=raw_datasets["""eval"""] if training_args.do_eval else None , compute_metrics=__magic_name__ , tokenizer=__magic_name__ , ) # Training if training_args.do_train: UpperCamelCase :int = None if training_args.resume_from_checkpoint is not None: UpperCamelCase :List[Any] = training_args.resume_from_checkpoint elif last_checkpoint is not None: UpperCamelCase :List[Any] = last_checkpoint UpperCamelCase :Optional[Any] = trainer.train(resume_from_checkpoint=__magic_name__ ) trainer.save_model() trainer.log_metrics("""train""" , train_result.metrics ) trainer.save_metrics("""train""" , train_result.metrics ) trainer.save_state() # Evaluation if training_args.do_eval: UpperCamelCase :str = trainer.evaluate() trainer.log_metrics("""eval""" , __magic_name__ ) trainer.save_metrics("""eval""" , __magic_name__ ) # Write model card and (optionally) push to hub UpperCamelCase :Tuple = { """finetuned_from""": model_args.model_name_or_path, """tasks""": """audio-classification""", """dataset""": data_args.dataset_name, """tags""": ["""audio-classification"""], } if training_args.push_to_hub: trainer.push_to_hub(**__magic_name__ ) else: trainer.create_model_card(**__magic_name__ ) if __name__ == "__main__": main()
38
import argparse import json import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils.deepspeed import DummyOptim, DummyScheduler UpperCAmelCase_ : Union[str, Any] = 16 UpperCAmelCase_ : int = 32 def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Accelerator , __magic_name__ : int = 16 , __magic_name__ : str = "bert-base-cased" ) -> Dict: """simple docstring""" UpperCamelCase :List[str] = AutoTokenizer.from_pretrained(__magic_name__ ) UpperCamelCase :Union[str, Any] = load_dataset("""glue""" , """mrpc""" ) def tokenize_function(__magic_name__ : Tuple ): # max_length=None => use the model max length (it's actually the default) UpperCamelCase :List[Any] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=__magic_name__ , max_length=__magic_name__ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset UpperCamelCase :List[Any] = datasets.map( __magic_name__ , batched=__magic_name__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , load_from_cache_file=__magic_name__ ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library UpperCamelCase :Optional[Any] = tokenized_datasets.rename_column("""label""" , """labels""" ) def collate_fn(__magic_name__ : Any ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(__magic_name__ , padding="""max_length""" , max_length=128 , return_tensors="""pt""" ) return tokenizer.pad(__magic_name__ , padding="""longest""" , return_tensors="""pt""" ) # Instantiate dataloaders. UpperCamelCase :List[str] = DataLoader( tokenized_datasets["""train"""] , shuffle=__magic_name__ , collate_fn=__magic_name__ , batch_size=__magic_name__ ) UpperCamelCase :List[Any] = DataLoader( tokenized_datasets["""validation"""] , shuffle=__magic_name__ , collate_fn=__magic_name__ , batch_size=__magic_name__ ) return train_dataloader, eval_dataloader def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Dict , __magic_name__ : Optional[Any] ) -> List[Any]: """simple docstring""" UpperCamelCase :Optional[Any] = Accelerator() # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs UpperCamelCase :Union[str, Any] = config["""lr"""] UpperCamelCase :List[str] = int(config["""num_epochs"""] ) UpperCamelCase :str = int(config["""seed"""] ) UpperCamelCase :Dict = int(config["""batch_size"""] ) UpperCamelCase :Union[str, Any] = args.model_name_or_path set_seed(__magic_name__ ) UpperCamelCase , UpperCamelCase :Dict = get_dataloaders(__magic_name__ , __magic_name__ , __magic_name__ ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) UpperCamelCase :List[str] = AutoModelForSequenceClassification.from_pretrained(__magic_name__ , return_dict=__magic_name__ ) # Instantiate optimizer UpperCamelCase :Union[str, Any] = ( AdamW if accelerator.state.deepspeed_plugin is None or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config else DummyOptim ) UpperCamelCase :Optional[Any] = optimizer_cls(params=model.parameters() , lr=__magic_name__ ) if accelerator.state.deepspeed_plugin is not None: UpperCamelCase :Any = accelerator.state.deepspeed_plugin.deepspeed_config[ """gradient_accumulation_steps""" ] else: UpperCamelCase :Any = 1 UpperCamelCase :Dict = (len(__magic_name__ ) * num_epochs) // gradient_accumulation_steps # Instantiate scheduler if ( accelerator.state.deepspeed_plugin is None or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config ): UpperCamelCase :List[Any] = get_linear_schedule_with_warmup( optimizer=__magic_name__ , num_warmup_steps=0 , num_training_steps=__magic_name__ , ) else: UpperCamelCase :Any = DummyScheduler(__magic_name__ , total_num_steps=__magic_name__ , warmup_num_steps=0 ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase :str = accelerator.prepare( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) # We need to keep track of how many total steps we have iterated over UpperCamelCase :int = 0 # We also need to keep track of the stating epoch so files are named properly UpperCamelCase :Tuple = 0 # Now we train the model UpperCamelCase :Any = evaluate.load("""glue""" , """mrpc""" ) UpperCamelCase :Tuple = 0 UpperCamelCase :List[Any] = {} for epoch in range(__magic_name__ , __magic_name__ ): model.train() for step, batch in enumerate(__magic_name__ ): UpperCamelCase :List[str] = model(**__magic_name__ ) UpperCamelCase :Dict = outputs.loss UpperCamelCase :Optional[int] = loss / gradient_accumulation_steps accelerator.backward(__magic_name__ ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() overall_step += 1 model.eval() UpperCamelCase :str = 0 for step, batch in enumerate(__magic_name__ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): UpperCamelCase :Optional[int] = model(**__magic_name__ ) UpperCamelCase :List[Any] = outputs.logits.argmax(dim=-1 ) # It is slightly faster to call this once, than multiple times UpperCamelCase , UpperCamelCase :Optional[int] = accelerator.gather( (predictions, batch["""labels"""]) ) # If we are in a multiprocess environment, the last batch has duplicates if accelerator.use_distributed: if step == len(__magic_name__ ) - 1: UpperCamelCase :Dict = predictions[: len(eval_dataloader.dataset ) - samples_seen] UpperCamelCase :List[str] = references[: len(eval_dataloader.dataset ) - samples_seen] else: samples_seen += references.shape[0] metric.add_batch( predictions=__magic_name__ , references=__magic_name__ , ) UpperCamelCase :List[str] = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f"""epoch {epoch}:""" , __magic_name__ ) UpperCamelCase :Dict = eval_metric["""accuracy"""] if best_performance < eval_metric["accuracy"]: UpperCamelCase :str = eval_metric["""accuracy"""] if args.performance_lower_bound is not None: assert ( args.performance_lower_bound <= best_performance ), f"""Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}""" accelerator.wait_for_everyone() if accelerator.is_main_process: with open(os.path.join(args.output_dir , """all_results.json""" ) , """w""" ) as f: json.dump(__magic_name__ , __magic_name__ ) def SCREAMING_SNAKE_CASE_ ( ) -> Tuple: """simple docstring""" UpperCamelCase :List[str] = argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""" ) parser.add_argument( """--model_name_or_path""" , type=__magic_name__ , default="""bert-base-cased""" , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=__magic_name__ , ) parser.add_argument( """--output_dir""" , type=__magic_name__ , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , ) parser.add_argument( """--performance_lower_bound""" , type=__magic_name__ , default=__magic_name__ , help="""Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.""" , ) parser.add_argument( """--num_epochs""" , type=__magic_name__ , default=3 , help="""Number of train epochs.""" , ) UpperCamelCase :str = parser.parse_args() UpperCamelCase :Any = {"""lr""": 2E-5, """num_epochs""": args.num_epochs, """seed""": 42, """batch_size""": 16} training_function(__magic_name__ , __magic_name__ ) if __name__ == "__main__": main()
38
1
UpperCAmelCase_ : Dict = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []} UpperCAmelCase_ : str = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]} def SCREAMING_SNAKE_CASE_ ( __magic_name__ : dict[int, list[int]] , __magic_name__ : int , __magic_name__ : list[bool] ) -> list[int]: """simple docstring""" UpperCamelCase :Any = True UpperCamelCase :int = [] for neighbour in graph[vert]: if not visited[neighbour]: order += topology_sort(__magic_name__ , __magic_name__ , __magic_name__ ) order.append(__magic_name__ ) return order def SCREAMING_SNAKE_CASE_ ( __magic_name__ : dict[int, list[int]] , __magic_name__ : int , __magic_name__ : list[bool] ) -> list[int]: """simple docstring""" UpperCamelCase :Dict = True UpperCamelCase :Tuple = [vert] for neighbour in reversed_graph[vert]: if not visited[neighbour]: component += find_components(__magic_name__ , __magic_name__ , __magic_name__ ) return component def SCREAMING_SNAKE_CASE_ ( __magic_name__ : dict[int, list[int]] ) -> list[list[int]]: """simple docstring""" UpperCamelCase :List[Any] = len(__magic_name__ ) * [False] UpperCamelCase :dict[int, list[int]] = {vert: [] for vert in range(len(__magic_name__ ) )} for vert, neighbours in graph.items(): for neighbour in neighbours: reversed_graph[neighbour].append(__magic_name__ ) UpperCamelCase :List[str] = [] for i, was_visited in enumerate(__magic_name__ ): if not was_visited: order += topology_sort(__magic_name__ , __magic_name__ , __magic_name__ ) UpperCamelCase :Optional[int] = [] UpperCamelCase :Dict = len(__magic_name__ ) * [False] for i in range(len(__magic_name__ ) ): UpperCamelCase :int = order[len(__magic_name__ ) - i - 1] if not visited[vert]: UpperCamelCase :Optional[int] = find_components(__magic_name__ , __magic_name__ , __magic_name__ ) components_list.append(__magic_name__ ) return components_list
38
import os import unittest from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer from ...test_tokenization_common import TokenizerTesterMixin class _SCREAMING_SNAKE_CASE ( _a , unittest.TestCase ): snake_case__ : Optional[Any] = TransfoXLTokenizer snake_case__ : List[Any] = False snake_case__ : Tuple = False def _A ( self : str ): super().setUp() UpperCamelCase :Dict = [ """<unk>""", """[CLS]""", """[SEP]""", """want""", """unwanted""", """wa""", """un""", """running""", """,""", """low""", """l""", ] UpperCamelCase :str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) ) def _A ( self : List[str] , **__lowerCamelCase : Any ): UpperCamelCase :Any = True return TransfoXLTokenizer.from_pretrained(self.tmpdirname , **__lowerCamelCase ) def _A ( self : Any , __lowerCamelCase : int ): UpperCamelCase :List[Any] = """<unk> UNwanted , running""" UpperCamelCase :int = """<unk> unwanted, running""" return input_text, output_text def _A ( self : Tuple ): UpperCamelCase :List[str] = TransfoXLTokenizer(vocab_file=self.vocab_file , lower_case=__lowerCamelCase ) UpperCamelCase :Any = tokenizer.tokenize("""<unk> UNwanted , running""" ) self.assertListEqual(__lowerCamelCase , ["""<unk>""", """unwanted""", """,""", """running"""] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCamelCase ) , [0, 4, 8, 7] ) def _A ( self : Optional[Any] ): UpperCamelCase :List[Any] = TransfoXLTokenizer(lower_case=__lowerCamelCase ) self.assertListEqual( tokenizer.tokenize(""" \tHeLLo ! how \n Are yoU ? """ ) , ["""hello""", """!""", """how""", """are""", """you""", """?"""] ) def _A ( self : Union[str, Any] ): UpperCamelCase :int = TransfoXLTokenizer(lower_case=__lowerCamelCase ) self.assertListEqual( tokenizer.tokenize(""" \tHeLLo ! how \n Are yoU ? """ ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""] ) def _A ( self : Tuple ): UpperCamelCase :Any = TransfoXLTokenizer(lower_case=__lowerCamelCase ) UpperCamelCase :Optional[int] = """Hello (bracket) and side-scrolled [and] Henry's $5,000 with 3.34 m. What's up!?""" UpperCamelCase :Optional[int] = [ """Hello""", """(""", """bracket""", """)""", """and""", """side""", """@-@""", """scrolled""", """[""", """and""", """]""", """Henry""", """'s""", """$""", """5""", """@,@""", """000""", """with""", """3""", """@.@""", """34""", """m""", """.""", """What""", """'s""", """up""", """!""", """?""", ] self.assertListEqual(tokenizer.tokenize(__lowerCamelCase ) , __lowerCamelCase ) self.assertEqual(tokenizer.convert_tokens_to_string(__lowerCamelCase ) , __lowerCamelCase ) def _A ( self : List[Any] ): UpperCamelCase :Any = self.get_tokenizer() UpperCamelCase :List[str] = len(__lowerCamelCase ) tokenizer.add_tokens(["""new1""", """new2"""] ) tokenizer.move_added_token("""new1""" , 1 ) # Check that moved token is not copied (duplicate) self.assertEqual(len(__lowerCamelCase ) , original_len + 2 ) # Check that token is moved to specified id self.assertEqual(tokenizer.encode("""new1""" ) , [1] ) self.assertEqual(tokenizer.decode([1] ) , """new1""" )
38
1
import argparse from typing import Dict import tensorflow as tf import torch from tqdm import tqdm from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration UpperCAmelCase_ : Union[str, Any] = [ # tf -> hf ('''/''', '''.'''), ('''layer_''', '''layers.'''), ('''kernel''', '''weight'''), ('''beta''', '''bias'''), ('''gamma''', '''weight'''), ('''pegasus''', '''model'''), ] UpperCAmelCase_ : str = [ ('''.output.dense''', '''.fc2'''), ('''intermediate.LayerNorm''', '''final_layer_norm'''), ('''intermediate.dense''', '''fc1'''), ] UpperCAmelCase_ : str = ( INIT_COMMON + [ ('''attention.self.LayerNorm''', '''self_attn_layer_norm'''), ('''attention.output.dense''', '''self_attn.out_proj'''), ('''attention.self''', '''self_attn'''), ('''attention.encdec.LayerNorm''', '''encoder_attn_layer_norm'''), ('''attention.encdec_output.dense''', '''encoder_attn.out_proj'''), ('''attention.encdec''', '''encoder_attn'''), ('''key''', '''k_proj'''), ('''value''', '''v_proj'''), ('''query''', '''q_proj'''), ('''decoder.LayerNorm''', '''decoder.layernorm_embedding'''), ] + END_COMMON ) UpperCAmelCase_ : Union[str, Any] = ( INIT_COMMON + [ ('''embeddings.word_embeddings''', '''shared.weight'''), ('''embeddings.position_embeddings''', '''embed_positions.weight'''), ('''attention.self.LayerNorm''', '''self_attn_layer_norm'''), ('''attention.output.dense''', '''self_attn.output'''), ('''attention.self''', '''self_attn.self'''), ('''encoder.LayerNorm''', '''encoder.layernorm_embedding'''), ] + END_COMMON ) UpperCAmelCase_ : str = [ '''encdec/key/bias''', '''encdec/query/bias''', '''encdec/value/bias''', '''self/key/bias''', '''self/query/bias''', '''self/value/bias''', '''encdec_output/dense/bias''', '''attention/output/dense/bias''', ] def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Optional[Any] , __magic_name__ : List[str] ) -> Optional[Any]: """simple docstring""" for tf_name, hf_name in patterns: UpperCamelCase :int = k.replace(__magic_name__ , __magic_name__ ) return k def SCREAMING_SNAKE_CASE_ ( __magic_name__ : dict , __magic_name__ : dict ) -> BigBirdPegasusForConditionalGeneration: """simple docstring""" UpperCamelCase :Any = BigBirdPegasusConfig(**__magic_name__ ) UpperCamelCase :List[str] = BigBirdPegasusForConditionalGeneration(__magic_name__ ) UpperCamelCase :Optional[Any] = torch_model.state_dict() UpperCamelCase :str = {} # separating decoder weights UpperCamelCase :Optional[Any] = {k: tf_weights[k] for k in tf_weights if k.startswith("""pegasus/decoder""" )} UpperCamelCase :Union[str, Any] = {k: tf_weights[k] for k in tf_weights if not k.startswith("""pegasus/decoder""" )} for k, v in tqdm(decoder_weights.items() , """tf -> hf conversion""" ): UpperCamelCase :Optional[Any] = [k.endswith(__magic_name__ ) for ending in KEYS_TO_IGNORE] if any(__magic_name__ ): continue UpperCamelCase :Any = DECODER_PATTERNS UpperCamelCase :str = rename_state_dict_key(__magic_name__ , __magic_name__ ) if new_k not in state_dict: raise ValueError(f"""could not find new key {new_k} in state dict. (converted from {k})""" ) if any(True if i in k else False for i in ["""dense""", """query""", """key""", """value"""] ): UpperCamelCase :List[Any] = v.T UpperCamelCase :List[str] = torch.from_numpy(__magic_name__ ) assert v.shape == state_dict[new_k].shape, f"""{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}""" for k, v in tqdm(remaining_weights.items() , """tf -> hf conversion""" ): UpperCamelCase :int = [k.endswith(__magic_name__ ) for ending in KEYS_TO_IGNORE] if any(__magic_name__ ): continue UpperCamelCase :Tuple = REMAINING_PATTERNS UpperCamelCase :int = rename_state_dict_key(__magic_name__ , __magic_name__ ) if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings": raise ValueError(f"""could not find new key {new_k} in state dict. (converted from {k})""" ) if any(True if i in k else False for i in ["""dense""", """query""", """key""", """value"""] ): UpperCamelCase :Union[str, Any] = v.T UpperCamelCase :Union[str, Any] = torch.from_numpy(__magic_name__ ) if k != "pegasus/embeddings/position_embeddings": assert v.shape == state_dict[new_k].shape, f"""{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}""" UpperCamelCase :Dict = mapping["""model.embed_positions.weight"""] UpperCamelCase :int = mapping.pop("""model.embed_positions.weight""" ) UpperCamelCase , UpperCamelCase :Optional[Any] = torch_model.load_state_dict(__magic_name__ , strict=__magic_name__ ) UpperCamelCase :Dict = [ k for k in missing if k not in [ """final_logits_bias""", """model.encoder.embed_tokens.weight""", """model.decoder.embed_tokens.weight""", """lm_head.weight""", ] ] assert unexpected_missing == [], f"""no matches found for the following torch keys {unexpected_missing}""" assert extra == [], f"""no matches found for the following tf keys {extra}""" return torch_model def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Tuple ) -> Dict: """simple docstring""" UpperCamelCase :Optional[Any] = tf.train.list_variables(__magic_name__ ) UpperCamelCase :str = {} UpperCamelCase :Optional[int] = ["""global_step"""] for name, shape in tqdm(__magic_name__ , desc="""converting tf checkpoint to dict""" ): UpperCamelCase :Dict = any(pat in name for pat in ignore_name ) if skip_key: continue UpperCamelCase :Any = tf.train.load_variable(__magic_name__ , __magic_name__ ) UpperCamelCase :Tuple = array return tf_weights def SCREAMING_SNAKE_CASE_ ( __magic_name__ : str , __magic_name__ : str , __magic_name__ : dict ) -> int: """simple docstring""" UpperCamelCase :List[Any] = get_tf_weights_as_numpy(__magic_name__ ) UpperCamelCase :List[str] = convert_bigbird_pegasus(__magic_name__ , __magic_name__ ) torch_model.save_pretrained(__magic_name__ ) if __name__ == "__main__": UpperCAmelCase_ : Any = argparse.ArgumentParser() parser.add_argument('''--tf_ckpt_path''', type=str, help='''passed to tf.train.list_variables''') parser.add_argument('''--save_dir''', default=None, type=str, help='''Path to the output PyTorch model.''') UpperCAmelCase_ : int = parser.parse_args() UpperCAmelCase_ : Optional[Any] = {} convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
38
import argparse import torch # Step 1. clone https://github.com/microsoft/unilm # Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd # Step 3. cd unilm # Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink # import classes from unilm.wavlm.WavLM import WavLM as WavLMOrig from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig from transformers import WavLMConfig, WavLMModel, logging logging.set_verbosity_info() UpperCAmelCase_ : Optional[Any] = logging.get_logger(__name__) UpperCAmelCase_ : Optional[Any] = { '''post_extract_proj''': '''feature_projection.projection''', '''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''', '''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''', '''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''', '''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''', '''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''', '''self_attn.grep_linear''': '''encoder.layers.*.attention.gru_rel_pos_linear''', '''self_attn.relative_attention_bias''': '''encoder.layers.*.attention.rel_attn_embed''', '''self_attn.grep_a''': '''encoder.layers.*.attention.gru_rel_pos_const''', '''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''', '''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''', '''fc2''': '''encoder.layers.*.feed_forward.output_dense''', '''final_layer_norm''': '''encoder.layers.*.final_layer_norm''', '''encoder.layer_norm''': '''encoder.layer_norm''', '''w2v_model.layer_norm''': '''feature_projection.layer_norm''', '''quantizer.weight_proj''': '''quantizer.weight_proj''', '''quantizer.vars''': '''quantizer.codevectors''', '''project_q''': '''project_q''', '''final_proj''': '''project_hid''', '''w2v_encoder.proj''': '''ctc_proj''', '''mask_emb''': '''masked_spec_embed''', } UpperCAmelCase_ : int = [ '''ctc_proj''', '''quantizer.weight_proj''', '''quantizer.codevectors''', '''project_q''', '''project_hid''', ] def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Dict , __magic_name__ : Dict , __magic_name__ : Union[str, Any] , __magic_name__ : Tuple , __magic_name__ : Optional[int] ) -> Dict: """simple docstring""" for attribute in key.split(""".""" ): UpperCamelCase :Dict = getattr(__magic_name__ , __magic_name__ ) if weight_type is not None: UpperCamelCase :Optional[int] = getattr(__magic_name__ , __magic_name__ ).shape else: UpperCamelCase :Optional[int] = hf_pointer.shape assert hf_shape == value.shape, ( f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be""" f""" {value.shape} for {full_name}""" ) if weight_type == "weight": UpperCamelCase :str = value elif weight_type == "weight_g": UpperCamelCase :int = value elif weight_type == "weight_v": UpperCamelCase :int = value elif weight_type == "bias": UpperCamelCase :List[Any] = value else: UpperCamelCase :Any = value logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" ) def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Optional[Any] , __magic_name__ : List[str] ) -> Optional[Any]: """simple docstring""" UpperCamelCase :Union[str, Any] = [] UpperCamelCase :Dict = fairseq_model.state_dict() UpperCamelCase :int = hf_model.feature_extractor for name, value in fairseq_dict.items(): UpperCamelCase :str = False if "conv_layers" in name: load_conv_layer( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , hf_model.config.feat_extract_norm == """group""" , ) UpperCamelCase :Union[str, Any] = True else: for key, mapped_key in MAPPING.items(): if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]: UpperCamelCase :Optional[int] = True if "*" in mapped_key: UpperCamelCase :List[Any] = name.split(__magic_name__ )[0].split(""".""" )[-2] UpperCamelCase :int = mapped_key.replace("""*""" , __magic_name__ ) if "weight_g" in name: UpperCamelCase :List[Any] = """weight_g""" elif "weight_v" in name: UpperCamelCase :List[Any] = """weight_v""" elif "bias" in name and "relative_attention_bias" not in name: UpperCamelCase :Any = """bias""" elif "weight" in name: # TODO: don't match quantizer.weight_proj UpperCamelCase :List[str] = """weight""" else: UpperCamelCase :Optional[int] = None set_recursively(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) continue if not is_used: unused_weights.append(__magic_name__ ) logger.warning(f"""Unused weights: {unused_weights}""" ) def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Any , __magic_name__ : str , __magic_name__ : int , __magic_name__ : int , __magic_name__ : List[str] ) -> Dict: """simple docstring""" UpperCamelCase :Dict = full_name.split("""conv_layers.""" )[-1] UpperCamelCase :int = name.split(""".""" ) UpperCamelCase :str = int(items[0] ) UpperCamelCase :str = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) UpperCamelCase :Tuple = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) UpperCamelCase :Dict = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was""" " found." ) UpperCamelCase :Tuple = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" ) UpperCamelCase :Union[str, Any] = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(__magic_name__ ) @torch.no_grad() def SCREAMING_SNAKE_CASE_ ( __magic_name__ : List[str] , __magic_name__ : List[Any] , __magic_name__ : str=None ) -> int: """simple docstring""" UpperCamelCase :List[Any] = torch.load(__magic_name__ ) UpperCamelCase :List[Any] = WavLMConfigOrig(checkpoint["""cfg"""] ) UpperCamelCase :int = WavLMOrig(__magic_name__ ) model.load_state_dict(checkpoint["""model"""] ) model.eval() if config_path is not None: UpperCamelCase :List[Any] = WavLMConfig.from_pretrained(__magic_name__ ) else: UpperCamelCase :Any = WavLMConfig() UpperCamelCase :Dict = WavLMModel(__magic_name__ ) recursively_load_weights(__magic_name__ , __magic_name__ ) hf_wavlm.save_pretrained(__magic_name__ ) if __name__ == "__main__": UpperCAmelCase_ : Union[str, Any] = argparse.ArgumentParser() parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') UpperCAmelCase_ : Optional[int] = parser.parse_args() convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
38
1
from __future__ import annotations import unittest from transformers import XGLMConfig, XGLMTokenizer, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers.models.xglm.modeling_tf_xglm import ( TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXGLMForCausalLM, TFXGLMModel, ) @require_tf class _SCREAMING_SNAKE_CASE : snake_case__ : List[str] = XGLMConfig snake_case__ : Optional[Any] = {} snake_case__ : Optional[int] = """gelu""" def __init__( self : Union[str, Any] , __lowerCamelCase : str , __lowerCamelCase : Optional[Any]=14 , __lowerCamelCase : Union[str, Any]=7 , __lowerCamelCase : List[Any]=True , __lowerCamelCase : Any=True , __lowerCamelCase : int=True , __lowerCamelCase : Any=99 , __lowerCamelCase : Union[str, Any]=32 , __lowerCamelCase : List[Any]=2 , __lowerCamelCase : Optional[Any]=4 , __lowerCamelCase : Optional[Any]=37 , __lowerCamelCase : int="gelu" , __lowerCamelCase : int=0.1 , __lowerCamelCase : Any=0.1 , __lowerCamelCase : List[str]=512 , __lowerCamelCase : List[Any]=0.02 , ): UpperCamelCase :Tuple = parent UpperCamelCase :Optional[Any] = batch_size UpperCamelCase :List[Any] = seq_length UpperCamelCase :Union[str, Any] = is_training UpperCamelCase :Tuple = use_input_mask UpperCamelCase :List[str] = use_labels UpperCamelCase :Union[str, Any] = vocab_size UpperCamelCase :Tuple = d_model UpperCamelCase :Any = num_hidden_layers UpperCamelCase :Union[str, Any] = num_attention_heads UpperCamelCase :Optional[Any] = ffn_dim UpperCamelCase :List[Any] = activation_function UpperCamelCase :str = activation_dropout UpperCamelCase :List[str] = attention_dropout UpperCamelCase :List[str] = max_position_embeddings UpperCamelCase :Dict = initializer_range UpperCamelCase :List[Any] = None UpperCamelCase :str = 0 UpperCamelCase :Any = 2 UpperCamelCase :Any = 1 def _A ( self : int ): return XGLMConfig.from_pretrained("""facebook/xglm-564M""" ) def _A ( self : List[str] ): UpperCamelCase :Union[str, Any] = tf.clip_by_value( ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 ) UpperCamelCase :List[str] = None if self.use_input_mask: UpperCamelCase :List[Any] = random_attention_mask([self.batch_size, self.seq_length] ) UpperCamelCase :str = self.get_config() UpperCamelCase :List[Any] = floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 ) return ( config, input_ids, input_mask, head_mask, ) def _A ( self : str ): return XGLMConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=__lowerCamelCase , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=__lowerCamelCase , ) def _A ( self : Union[str, Any] ): UpperCamelCase :Optional[Any] = self.prepare_config_and_inputs() ( ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ) :Optional[Any] = config_and_inputs UpperCamelCase :str = { """input_ids""": input_ids, """head_mask""": head_mask, } return config, inputs_dict @require_tf class _SCREAMING_SNAKE_CASE ( _a , _a , unittest.TestCase ): snake_case__ : int = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else () snake_case__ : Dict = (TFXGLMForCausalLM,) if is_tf_available() else () snake_case__ : Optional[Any] = ( {"""feature-extraction""": TFXGLMModel, """text-generation""": TFXGLMForCausalLM} if is_tf_available() else {} ) snake_case__ : List[str] = False snake_case__ : Optional[int] = False snake_case__ : List[str] = False def _A ( self : List[Any] ): UpperCamelCase :Optional[Any] = TFXGLMModelTester(self ) UpperCamelCase :List[Any] = ConfigTester(self , config_class=__lowerCamelCase , n_embd=37 ) def _A ( self : int ): self.config_tester.run_common_tests() @slow def _A ( self : int ): for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCamelCase :str = TFXGLMModel.from_pretrained(__lowerCamelCase ) self.assertIsNotNone(__lowerCamelCase ) @unittest.skip(reason="""Currently, model embeddings are going to undergo a major refactor.""" ) def _A ( self : Dict ): super().test_resize_token_embeddings() @require_tf class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): @slow def _A ( self : Optional[Any] , __lowerCamelCase : Optional[int]=True ): UpperCamelCase :Dict = TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""" ) UpperCamelCase :str = tf.convert_to_tensor([[2, 268, 9_865]] , dtype=tf.intaa ) # The dog # </s> The dog is a very friendly dog. He is very affectionate and loves to play with other # fmt: off UpperCamelCase :int = [2, 268, 9_865, 67, 11, 1_988, 57_252, 9_865, 5, 984, 67, 1_988, 213_838, 1_658, 53, 70_446, 33, 6_657, 278, 1_581] # fmt: on UpperCamelCase :str = model.generate(__lowerCamelCase , do_sample=__lowerCamelCase , num_beams=1 ) if verify_outputs: self.assertListEqual(output_ids[0].numpy().tolist() , __lowerCamelCase ) @slow def _A ( self : Tuple ): UpperCamelCase :Tuple = XGLMTokenizer.from_pretrained("""facebook/xglm-564M""" ) UpperCamelCase :Tuple = TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""" ) tf.random.set_seed(0 ) UpperCamelCase :Optional[int] = tokenizer("""Today is a nice day and""" , return_tensors="""tf""" ) UpperCamelCase :str = tokenized.input_ids # forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices) with tf.device(""":/CPU:0""" ): UpperCamelCase :Any = model.generate(__lowerCamelCase , do_sample=__lowerCamelCase , seed=[7, 0] ) UpperCamelCase :str = tokenizer.decode(output_ids[0] , skip_special_tokens=__lowerCamelCase ) UpperCamelCase :Union[str, Any] = ( """Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due""" ) self.assertEqual(__lowerCamelCase , __lowerCamelCase ) @slow def _A ( self : int ): UpperCamelCase :List[str] = TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""" ) UpperCamelCase :Tuple = XGLMTokenizer.from_pretrained("""facebook/xglm-564M""" ) UpperCamelCase :str = """left""" # use different length sentences to test batching UpperCamelCase :Union[str, Any] = [ """This is an extremelly long sentence that only exists to test the ability of the model to cope with """ """left-padding, such as in batched generation. The output for the sequence below should be the same """ """regardless of whether left padding is applied or not. When""", """Hello, my dog is a little""", ] UpperCamelCase :str = tokenizer(__lowerCamelCase , return_tensors="""tf""" , padding=__lowerCamelCase ) UpperCamelCase :Optional[int] = inputs["""input_ids"""] UpperCamelCase :Dict = model.generate(input_ids=__lowerCamelCase , attention_mask=inputs["""attention_mask"""] , max_new_tokens=12 ) UpperCamelCase :List[str] = tokenizer(sentences[0] , return_tensors="""tf""" ).input_ids UpperCamelCase :List[Any] = model.generate(input_ids=__lowerCamelCase , max_new_tokens=12 ) UpperCamelCase :str = tokenizer(sentences[1] , return_tensors="""tf""" ).input_ids UpperCamelCase :str = model.generate(input_ids=__lowerCamelCase , max_new_tokens=12 ) UpperCamelCase :Any = tokenizer.batch_decode(__lowerCamelCase , skip_special_tokens=__lowerCamelCase ) UpperCamelCase :Union[str, Any] = tokenizer.decode(output_non_padded[0] , skip_special_tokens=__lowerCamelCase ) UpperCamelCase :List[Any] = tokenizer.decode(output_padded[0] , skip_special_tokens=__lowerCamelCase ) UpperCamelCase :List[Any] = [ """This is an extremelly long sentence that only exists to test the ability of the model to cope with """ """left-padding, such as in batched generation. The output for the sequence below should be the same """ """regardless of whether left padding is applied or not. When left padding is applied, the sequence will be """ """a single""", """Hello, my dog is a little bit of a shy one, but he is very friendly""", ] self.assertListEqual(__lowerCamelCase , __lowerCamelCase ) self.assertListEqual(__lowerCamelCase , [non_padded_sentence, padded_sentence] )
38
import html from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin from ...utils import is_bsa_available, logging, requires_backends if is_bsa_available(): import bsa from bsa import BeautifulSoup UpperCAmelCase_ : Any = logging.get_logger(__name__) class _SCREAMING_SNAKE_CASE ( _a ): def __init__( self : Optional[int] , **__lowerCamelCase : Optional[int] ): requires_backends(self , ["""bs4"""] ) super().__init__(**__lowerCamelCase ) def _A ( self : List[str] , __lowerCamelCase : Any ): UpperCamelCase :Optional[int] = [] UpperCamelCase :List[str] = [] UpperCamelCase :Union[str, Any] = element if element.name else element.parent for parent in child.parents: # type: bs4.element.Tag UpperCamelCase :Optional[Any] = parent.find_all(child.name , recursive=__lowerCamelCase ) xpath_tags.append(child.name ) xpath_subscripts.append( 0 if 1 == len(__lowerCamelCase ) else next(i for i, s in enumerate(__lowerCamelCase , 1 ) if s is child ) ) UpperCamelCase :Any = parent xpath_tags.reverse() xpath_subscripts.reverse() return xpath_tags, xpath_subscripts def _A ( self : Any , __lowerCamelCase : Tuple ): UpperCamelCase :Any = BeautifulSoup(__lowerCamelCase , """html.parser""" ) UpperCamelCase :Union[str, Any] = [] UpperCamelCase :Tuple = [] UpperCamelCase :Tuple = [] for element in html_code.descendants: if type(__lowerCamelCase ) == bsa.element.NavigableString: if type(element.parent ) != bsa.element.Tag: continue UpperCamelCase :Any = html.unescape(__lowerCamelCase ).strip() if not text_in_this_tag: continue all_doc_strings.append(__lowerCamelCase ) UpperCamelCase , UpperCamelCase :Optional[Any] = self.xpath_soup(__lowerCamelCase ) stringaxtag_seq.append(__lowerCamelCase ) stringaxsubs_seq.append(__lowerCamelCase ) if len(__lowerCamelCase ) != len(__lowerCamelCase ): raise ValueError("""Number of doc strings and xtags does not correspond""" ) if len(__lowerCamelCase ) != len(__lowerCamelCase ): raise ValueError("""Number of doc strings and xsubs does not correspond""" ) return all_doc_strings, stringaxtag_seq, stringaxsubs_seq def _A ( self : int , __lowerCamelCase : List[Any] , __lowerCamelCase : List[str] ): UpperCamelCase :Tuple = """""" for tagname, subs in zip(__lowerCamelCase , __lowerCamelCase ): xpath += F"""/{tagname}""" if subs != 0: xpath += F"""[{subs}]""" return xpath def __call__( self : Any , __lowerCamelCase : Dict ): UpperCamelCase :Any = False # Check that strings has a valid type if isinstance(__lowerCamelCase , __lowerCamelCase ): UpperCamelCase :List[Any] = True elif isinstance(__lowerCamelCase , (list, tuple) ): if len(__lowerCamelCase ) == 0 or isinstance(html_strings[0] , __lowerCamelCase ): UpperCamelCase :Any = True if not valid_strings: raise ValueError( """HTML strings must of type `str`, `List[str]` (batch of examples), """ F"""but is of type {type(__lowerCamelCase )}.""" ) UpperCamelCase :str = bool(isinstance(__lowerCamelCase , (list, tuple) ) and (isinstance(html_strings[0] , __lowerCamelCase )) ) if not is_batched: UpperCamelCase :Any = [html_strings] # Get nodes + xpaths UpperCamelCase :Union[str, Any] = [] UpperCamelCase :str = [] for html_string in html_strings: UpperCamelCase , UpperCamelCase , UpperCamelCase :int = self.get_three_from_single(__lowerCamelCase ) nodes.append(__lowerCamelCase ) UpperCamelCase :int = [] for node, tag_list, sub_list in zip(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ): UpperCamelCase :str = self.construct_xpath(__lowerCamelCase , __lowerCamelCase ) xpath_strings.append(__lowerCamelCase ) xpaths.append(__lowerCamelCase ) # return as Dict UpperCamelCase :Optional[int] = {"""nodes""": nodes, """xpaths""": xpaths} UpperCamelCase :Any = BatchFeature(data=__lowerCamelCase , tensor_type=__lowerCamelCase ) return encoded_inputs
38
1
from __future__ import annotations from math import pi def SCREAMING_SNAKE_CASE_ ( __magic_name__ : float , __magic_name__ : float , __magic_name__ : float ) -> dict[str, float]: """simple docstring""" if (inductance, frequency, reactance).count(0 ) != 1: raise ValueError("""One and only one argument must be 0""" ) if inductance < 0: raise ValueError("""Inductance cannot be negative""" ) if frequency < 0: raise ValueError("""Frequency cannot be negative""" ) if reactance < 0: raise ValueError("""Inductive reactance cannot be negative""" ) if inductance == 0: return {"inductance": reactance / (2 * pi * frequency)} elif frequency == 0: return {"frequency": reactance / (2 * pi * inductance)} elif reactance == 0: return {"reactance": 2 * pi * frequency * inductance} else: raise ValueError("""Exactly one argument must be 0""" ) if __name__ == "__main__": import doctest doctest.testmod()
38
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : list[list[int]] , __magic_name__ : int , __magic_name__ : int , __magic_name__ : list[int] ) -> bool: """simple docstring""" if graph[path[curr_ind - 1]][next_ver] == 0: return False # 2. Validate that next vertex is not already in path return not any(vertex == next_ver for vertex in path ) def SCREAMING_SNAKE_CASE_ ( __magic_name__ : list[list[int]] , __magic_name__ : list[int] , __magic_name__ : int ) -> bool: """simple docstring""" if curr_ind == len(__magic_name__ ): # return whether path exists between current and starting vertices return graph[path[curr_ind - 1]][path[0]] == 1 # Recursive Step for next_ver in range(0 , len(__magic_name__ ) ): if valid_connection(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ): # Insert current vertex into path as next transition UpperCamelCase :str = next_ver # Validate created path if util_hamilton_cycle(__magic_name__ , __magic_name__ , curr_ind + 1 ): return True # Backtrack UpperCamelCase :Union[str, Any] = -1 return False def SCREAMING_SNAKE_CASE_ ( __magic_name__ : list[list[int]] , __magic_name__ : int = 0 ) -> list[int]: """simple docstring""" UpperCamelCase :Union[str, Any] = [-1] * (len(__magic_name__ ) + 1) # initialize start and end of path with starting index UpperCamelCase :Any = start_index # evaluate and if we find answer return path either return empty array return path if util_hamilton_cycle(__magic_name__ , __magic_name__ , 1 ) else []
38
1
import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import OwlViTImageProcessor, OwlViTProcessor @require_vision class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): def _A ( self : List[str] ): UpperCamelCase :int = tempfile.mkdtemp() # fmt: off UpperCamelCase :str = ["""""", """l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""] # fmt: on UpperCamelCase :Optional[int] = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) ) UpperCamelCase :List[str] = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>""", """"""] UpperCamelCase :List[str] = {"""unk_token""": """<unk>"""} UpperCamelCase :List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) UpperCamelCase :Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp: fp.write(json.dumps(__lowerCamelCase ) + """\n""" ) with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp: fp.write("""\n""".join(__lowerCamelCase ) ) UpperCamelCase :List[Any] = { """do_resize""": True, """size""": 20, """do_center_crop""": True, """crop_size""": 18, """do_normalize""": True, """image_mean""": [0.48145466, 0.4578275, 0.40821073], """image_std""": [0.26862954, 0.26130258, 0.27577711], } UpperCamelCase :Optional[Any] = os.path.join(self.tmpdirname , __lowerCamelCase ) with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp: json.dump(__lowerCamelCase , __lowerCamelCase ) def _A ( self : Dict , **__lowerCamelCase : Dict ): return CLIPTokenizer.from_pretrained(self.tmpdirname , pad_token="""!""" , **__lowerCamelCase ) def _A ( self : str , **__lowerCamelCase : Any ): return CLIPTokenizerFast.from_pretrained(self.tmpdirname , pad_token="""!""" , **__lowerCamelCase ) def _A ( self : Tuple , **__lowerCamelCase : List[Any] ): return OwlViTImageProcessor.from_pretrained(self.tmpdirname , **__lowerCamelCase ) def _A ( self : str ): shutil.rmtree(self.tmpdirname ) def _A ( self : str ): UpperCamelCase :Any = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] UpperCamelCase :Any = [Image.fromarray(np.moveaxis(__lowerCamelCase , 0 , -1 ) ) for x in image_inputs] return image_inputs def _A ( self : str ): UpperCamelCase :List[Any] = self.get_tokenizer() UpperCamelCase :List[Any] = self.get_rust_tokenizer() UpperCamelCase :Tuple = self.get_image_processor() UpperCamelCase :Any = OwlViTProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase ) processor_slow.save_pretrained(self.tmpdirname ) UpperCamelCase :int = OwlViTProcessor.from_pretrained(self.tmpdirname , use_fast=__lowerCamelCase ) UpperCamelCase :Union[str, Any] = OwlViTProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase ) processor_fast.save_pretrained(self.tmpdirname ) UpperCamelCase :str = OwlViTProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , __lowerCamelCase ) self.assertIsInstance(processor_fast.tokenizer , __lowerCamelCase ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , __lowerCamelCase ) self.assertIsInstance(processor_fast.image_processor , __lowerCamelCase ) def _A ( self : List[str] ): UpperCamelCase :Optional[int] = OwlViTProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) UpperCamelCase :Any = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" ) UpperCamelCase :str = self.get_image_processor(do_normalize=__lowerCamelCase ) UpperCamelCase :str = OwlViTProcessor.from_pretrained( self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=__lowerCamelCase ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , __lowerCamelCase ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , __lowerCamelCase ) def _A ( self : Union[str, Any] ): UpperCamelCase :Union[str, Any] = self.get_image_processor() UpperCamelCase :Dict = self.get_tokenizer() UpperCamelCase :Optional[int] = OwlViTProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase ) UpperCamelCase :Dict = self.prepare_image_inputs() UpperCamelCase :List[Any] = image_processor(__lowerCamelCase , return_tensors="""np""" ) UpperCamelCase :Dict = processor(images=__lowerCamelCase , return_tensors="""np""" ) for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 ) def _A ( self : Dict ): UpperCamelCase :List[Any] = self.get_image_processor() UpperCamelCase :str = self.get_tokenizer() UpperCamelCase :List[Any] = OwlViTProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase ) UpperCamelCase :Optional[Any] = """lower newer""" UpperCamelCase :List[str] = processor(text=__lowerCamelCase , return_tensors="""np""" ) UpperCamelCase :List[str] = tokenizer(__lowerCamelCase , return_tensors="""np""" ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key][0].tolist() , encoded_processor[key][0].tolist() ) def _A ( self : Optional[int] ): UpperCamelCase :List[str] = self.get_image_processor() UpperCamelCase :Union[str, Any] = self.get_tokenizer() UpperCamelCase :Any = OwlViTProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase ) UpperCamelCase :Optional[int] = """lower newer""" UpperCamelCase :Tuple = self.prepare_image_inputs() UpperCamelCase :int = processor(text=__lowerCamelCase , images=__lowerCamelCase ) self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """pixel_values"""] ) # test if it raises when no input is passed with pytest.raises(__lowerCamelCase ): processor() def _A ( self : Any ): UpperCamelCase :Optional[Any] = """google/owlvit-base-patch32""" UpperCamelCase :List[Any] = OwlViTProcessor.from_pretrained(__lowerCamelCase ) UpperCamelCase :Union[str, Any] = ["""cat""", """nasa badge"""] UpperCamelCase :str = processor(text=__lowerCamelCase ) UpperCamelCase :Optional[Any] = 16 self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask"""] ) self.assertEqual(inputs["""input_ids"""].shape , (2, seq_length) ) # test if it raises when no input is passed with pytest.raises(__lowerCamelCase ): processor() def _A ( self : List[str] ): UpperCamelCase :Dict = """google/owlvit-base-patch32""" UpperCamelCase :Dict = OwlViTProcessor.from_pretrained(__lowerCamelCase ) UpperCamelCase :Tuple = [["""cat""", """nasa badge"""], ["""person"""]] UpperCamelCase :Union[str, Any] = processor(text=__lowerCamelCase ) UpperCamelCase :int = 16 UpperCamelCase :int = len(__lowerCamelCase ) UpperCamelCase :Dict = max([len(__lowerCamelCase ) for texts in input_texts] ) self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask"""] ) self.assertEqual(inputs["""input_ids"""].shape , (batch_size * num_max_text_queries, seq_length) ) # test if it raises when no input is passed with pytest.raises(__lowerCamelCase ): processor() def _A ( self : Any ): UpperCamelCase :Optional[Any] = """google/owlvit-base-patch32""" UpperCamelCase :List[Any] = OwlViTProcessor.from_pretrained(__lowerCamelCase ) UpperCamelCase :Optional[int] = ["""cat""", """nasa badge"""] UpperCamelCase :Union[str, Any] = processor(text=__lowerCamelCase ) UpperCamelCase :str = 16 UpperCamelCase :Optional[int] = inputs["""input_ids"""] UpperCamelCase :str = [ [49_406, 2_368, 49_407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [49_406, 6_841, 11_301, 49_407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], ] self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask"""] ) self.assertEqual(inputs["""input_ids"""].shape , (2, seq_length) ) self.assertListEqual(list(input_ids[0] ) , predicted_ids[0] ) self.assertListEqual(list(input_ids[1] ) , predicted_ids[1] ) def _A ( self : str ): UpperCamelCase :Union[str, Any] = self.get_image_processor() UpperCamelCase :str = self.get_tokenizer() UpperCamelCase :Optional[int] = OwlViTProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase ) UpperCamelCase :List[Any] = self.prepare_image_inputs() UpperCamelCase :List[str] = self.prepare_image_inputs() UpperCamelCase :Union[str, Any] = processor(images=__lowerCamelCase , query_images=__lowerCamelCase ) self.assertListEqual(list(inputs.keys() ) , ["""query_pixel_values""", """pixel_values"""] ) # test if it raises when no input is passed with pytest.raises(__lowerCamelCase ): processor() def _A ( self : Dict ): UpperCamelCase :Dict = self.get_image_processor() UpperCamelCase :str = self.get_tokenizer() UpperCamelCase :str = OwlViTProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase ) UpperCamelCase :str = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] UpperCamelCase :Any = processor.batch_decode(__lowerCamelCase ) UpperCamelCase :Optional[Any] = tokenizer.batch_decode(__lowerCamelCase ) self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
38
import os import tempfile import unittest from transformers import FlaubertConfig, is_torch_available from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( FlaubertForMultipleChoice, FlaubertForQuestionAnswering, FlaubertForQuestionAnsweringSimple, FlaubertForSequenceClassification, FlaubertForTokenClassification, FlaubertModel, FlaubertWithLMHeadModel, ) from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST class _SCREAMING_SNAKE_CASE ( _a ): def __init__( self : List[str] , __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any]=13 , __lowerCamelCase : str=7 , __lowerCamelCase : Tuple=True , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : Any=True , __lowerCamelCase : List[Any]=True , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : Optional[int]=False , __lowerCamelCase : str=False , __lowerCamelCase : List[Any]=False , __lowerCamelCase : List[Any]=2 , __lowerCamelCase : Union[str, Any]=99 , __lowerCamelCase : Optional[Any]=0 , __lowerCamelCase : Tuple=32 , __lowerCamelCase : Any=5 , __lowerCamelCase : Optional[Any]=4 , __lowerCamelCase : Union[str, Any]=0.1 , __lowerCamelCase : Union[str, Any]=0.1 , __lowerCamelCase : List[str]=512 , __lowerCamelCase : List[Any]=12 , __lowerCamelCase : int=2 , __lowerCamelCase : List[str]=0.02 , __lowerCamelCase : Union[str, Any]=3 , __lowerCamelCase : Tuple=4 , __lowerCamelCase : Optional[int]="last" , __lowerCamelCase : Optional[Any]=None , __lowerCamelCase : List[str]=None , ): UpperCamelCase :int = parent UpperCamelCase :Optional[int] = batch_size UpperCamelCase :str = seq_length UpperCamelCase :Optional[int] = is_training UpperCamelCase :Optional[int] = use_input_lengths UpperCamelCase :Union[str, Any] = use_token_type_ids UpperCamelCase :List[str] = use_labels UpperCamelCase :Dict = gelu_activation UpperCamelCase :Optional[int] = sinusoidal_embeddings UpperCamelCase :List[Any] = causal UpperCamelCase :Optional[int] = asm UpperCamelCase :List[str] = n_langs UpperCamelCase :int = vocab_size UpperCamelCase :List[Any] = n_special UpperCamelCase :List[Any] = hidden_size UpperCamelCase :List[str] = num_hidden_layers UpperCamelCase :List[Any] = num_attention_heads UpperCamelCase :Tuple = hidden_dropout_prob UpperCamelCase :List[str] = attention_probs_dropout_prob UpperCamelCase :Tuple = max_position_embeddings UpperCamelCase :List[str] = type_vocab_size UpperCamelCase :Union[str, Any] = type_sequence_label_size UpperCamelCase :int = initializer_range UpperCamelCase :List[str] = num_labels UpperCamelCase :Optional[int] = num_choices UpperCamelCase :Optional[Any] = summary_type UpperCamelCase :Tuple = use_proj UpperCamelCase :Optional[Any] = scope def _A ( self : List[str] ): UpperCamelCase :Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCamelCase :Any = random_attention_mask([self.batch_size, self.seq_length] ) UpperCamelCase :List[Any] = None if self.use_input_lengths: UpperCamelCase :Dict = ( ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2 ) # small variation of seq_length UpperCamelCase :str = None if self.use_token_type_ids: UpperCamelCase :int = ids_tensor([self.batch_size, self.seq_length] , self.n_langs ) UpperCamelCase :Optional[int] = None UpperCamelCase :int = None UpperCamelCase :List[Any] = None if self.use_labels: UpperCamelCase :Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCamelCase :List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCamelCase :List[str] = ids_tensor([self.batch_size] , 2 ).float() UpperCamelCase :List[str] = ids_tensor([self.batch_size] , self.num_choices ) UpperCamelCase :Union[str, Any] = self.get_config() return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def _A ( self : List[Any] ): return FlaubertConfig( vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , ) def _A ( self : Union[str, Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : int , __lowerCamelCase : Tuple , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] , __lowerCamelCase : Any , __lowerCamelCase : int , ): UpperCamelCase :Tuple = FlaubertModel(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() UpperCamelCase :int = model(__lowerCamelCase , lengths=__lowerCamelCase , langs=__lowerCamelCase ) UpperCamelCase :List[Any] = model(__lowerCamelCase , langs=__lowerCamelCase ) UpperCamelCase :int = model(__lowerCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _A ( self : Optional[int] , __lowerCamelCase : List[str] , __lowerCamelCase : Any , __lowerCamelCase : Tuple , __lowerCamelCase : int , __lowerCamelCase : List[Any] , __lowerCamelCase : Tuple , __lowerCamelCase : List[Any] , __lowerCamelCase : Any , __lowerCamelCase : Dict , ): UpperCamelCase :Any = FlaubertWithLMHeadModel(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() UpperCamelCase :Dict = model(__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _A ( self : List[Any] , __lowerCamelCase : str , __lowerCamelCase : Tuple , __lowerCamelCase : List[str] , __lowerCamelCase : int , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : str , __lowerCamelCase : List[str] , __lowerCamelCase : List[Any] , ): UpperCamelCase :Any = FlaubertForQuestionAnsweringSimple(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() UpperCamelCase :Any = model(__lowerCamelCase ) UpperCamelCase :int = model(__lowerCamelCase , start_positions=__lowerCamelCase , end_positions=__lowerCamelCase ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def _A ( self : str , __lowerCamelCase : int , __lowerCamelCase : str , __lowerCamelCase : Optional[Any] , __lowerCamelCase : int , __lowerCamelCase : Tuple , __lowerCamelCase : Any , __lowerCamelCase : List[str] , __lowerCamelCase : Dict , __lowerCamelCase : str , ): UpperCamelCase :str = FlaubertForQuestionAnswering(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() UpperCamelCase :Any = model(__lowerCamelCase ) UpperCamelCase :Optional[int] = model( __lowerCamelCase , start_positions=__lowerCamelCase , end_positions=__lowerCamelCase , cls_index=__lowerCamelCase , is_impossible=__lowerCamelCase , p_mask=__lowerCamelCase , ) UpperCamelCase :Union[str, Any] = model( __lowerCamelCase , start_positions=__lowerCamelCase , end_positions=__lowerCamelCase , cls_index=__lowerCamelCase , is_impossible=__lowerCamelCase , ) ((UpperCamelCase) , ) :int = result_with_labels.to_tuple() UpperCamelCase :int = model(__lowerCamelCase , start_positions=__lowerCamelCase , end_positions=__lowerCamelCase ) ((UpperCamelCase) , ) :List[Any] = result_with_labels.to_tuple() self.parent.assertEqual(result_with_labels.loss.shape , () ) self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual( result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual( result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) ) def _A ( self : List[Any] , __lowerCamelCase : Any , __lowerCamelCase : Any , __lowerCamelCase : int , __lowerCamelCase : Dict , __lowerCamelCase : Any , __lowerCamelCase : Any , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Tuple , ): UpperCamelCase :Optional[int] = FlaubertForSequenceClassification(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() UpperCamelCase :Tuple = model(__lowerCamelCase ) UpperCamelCase :List[str] = model(__lowerCamelCase , labels=__lowerCamelCase ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def _A ( self : Any , __lowerCamelCase : int , __lowerCamelCase : Optional[int] , __lowerCamelCase : str , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : int , __lowerCamelCase : Optional[int] , __lowerCamelCase : Tuple , __lowerCamelCase : List[Any] , __lowerCamelCase : List[Any] , ): UpperCamelCase :Dict = self.num_labels UpperCamelCase :Tuple = FlaubertForTokenClassification(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() UpperCamelCase :Optional[Any] = model(__lowerCamelCase , attention_mask=__lowerCamelCase , labels=__lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def _A ( self : Tuple , __lowerCamelCase : Tuple , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[str] , __lowerCamelCase : Tuple , __lowerCamelCase : Tuple , __lowerCamelCase : List[str] , __lowerCamelCase : List[Any] , ): UpperCamelCase :Union[str, Any] = self.num_choices UpperCamelCase :List[Any] = FlaubertForMultipleChoice(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() UpperCamelCase :Optional[Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() UpperCamelCase :Optional[int] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() UpperCamelCase :int = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() UpperCamelCase :Union[str, Any] = model( __lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def _A ( self : str ): UpperCamelCase :List[str] = self.prepare_config_and_inputs() ( ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ) :List[Any] = config_and_inputs UpperCamelCase :Union[str, Any] = { """input_ids""": input_ids, """token_type_ids""": token_type_ids, """lengths""": input_lengths, """attention_mask""": input_mask, } return config, inputs_dict @require_torch class _SCREAMING_SNAKE_CASE ( _a , _a , unittest.TestCase ): snake_case__ : Optional[int] = ( ( FlaubertModel, FlaubertWithLMHeadModel, FlaubertForQuestionAnswering, FlaubertForQuestionAnsweringSimple, FlaubertForSequenceClassification, FlaubertForTokenClassification, FlaubertForMultipleChoice, ) if is_torch_available() else () ) snake_case__ : Tuple = ( { """feature-extraction""": FlaubertModel, """fill-mask""": FlaubertWithLMHeadModel, """question-answering""": FlaubertForQuestionAnsweringSimple, """text-classification""": FlaubertForSequenceClassification, """token-classification""": FlaubertForTokenClassification, """zero-shot""": FlaubertForSequenceClassification, } if is_torch_available() else {} ) def _A ( self : int , __lowerCamelCase : List[str] , __lowerCamelCase : Tuple , __lowerCamelCase : List[str] , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int] ): if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith("""Fast""" ) ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def _A ( self : Optional[int] , __lowerCamelCase : Dict , __lowerCamelCase : Tuple , __lowerCamelCase : Tuple=False ): UpperCamelCase :Tuple = super()._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase ) if return_labels: if model_class.__name__ == "FlaubertForQuestionAnswering": UpperCamelCase :Tuple = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__lowerCamelCase ) UpperCamelCase :List[Any] = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__lowerCamelCase ) return inputs_dict def _A ( self : str ): UpperCamelCase :List[Any] = FlaubertModelTester(self ) UpperCamelCase :Any = ConfigTester(self , config_class=__lowerCamelCase , emb_dim=37 ) def _A ( self : Optional[int] ): self.config_tester.run_common_tests() def _A ( self : List[Any] ): UpperCamelCase :List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_model(*__lowerCamelCase ) def _A ( self : Optional[int] ): UpperCamelCase :Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_lm_head(*__lowerCamelCase ) def _A ( self : List[Any] ): UpperCamelCase :Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_simple_qa(*__lowerCamelCase ) def _A ( self : Union[str, Any] ): UpperCamelCase :int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_qa(*__lowerCamelCase ) def _A ( self : Optional[Any] ): UpperCamelCase :List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_sequence_classif(*__lowerCamelCase ) def _A ( self : Tuple ): UpperCamelCase :Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_token_classif(*__lowerCamelCase ) def _A ( self : int ): UpperCamelCase :Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_multiple_choice(*__lowerCamelCase ) @slow def _A ( self : Any ): for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCamelCase :Optional[int] = FlaubertModel.from_pretrained(__lowerCamelCase ) self.assertIsNotNone(__lowerCamelCase ) @slow @require_torch_gpu def _A ( self : Tuple ): UpperCamelCase , UpperCamelCase :Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # FlauBertForMultipleChoice behaves incorrectly in JIT environments. if model_class == FlaubertForMultipleChoice: return UpperCamelCase :Optional[Any] = True UpperCamelCase :Optional[Any] = model_class(config=__lowerCamelCase ) UpperCamelCase :str = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) UpperCamelCase :str = torch.jit.trace( __lowerCamelCase , (inputs_dict["""input_ids"""].to("""cpu""" ), inputs_dict["""attention_mask"""].to("""cpu""" )) ) with tempfile.TemporaryDirectory() as tmp: torch.jit.save(__lowerCamelCase , os.path.join(__lowerCamelCase , """traced_model.pt""" ) ) UpperCamelCase :int = torch.jit.load(os.path.join(__lowerCamelCase , """traced_model.pt""" ) , map_location=__lowerCamelCase ) loaded(inputs_dict["""input_ids"""].to(__lowerCamelCase ) , inputs_dict["""attention_mask"""].to(__lowerCamelCase ) ) @require_torch class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): @slow def _A ( self : Optional[Any] ): UpperCamelCase :Union[str, Any] = FlaubertModel.from_pretrained("""flaubert/flaubert_base_cased""" ) UpperCamelCase :Optional[Any] = torch.tensor([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] ) with torch.no_grad(): UpperCamelCase :Tuple = model(__lowerCamelCase )[0] UpperCamelCase :Union[str, Any] = torch.Size((1, 11, 768) ) self.assertEqual(output.shape , __lowerCamelCase ) UpperCamelCase :int = torch.tensor( [[[-2.6251, -1.4298, -0.0227], [-2.8510, -1.6387, 0.2258], [-2.8114, -1.1832, -0.3066]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , __lowerCamelCase , atol=1E-4 ) )
38
1
from sklearn.metrics import recall_score import datasets UpperCAmelCase_ : Dict = ''' Recall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation: Recall = TP / (TP + FN) Where TP is the true positives and FN is the false negatives. ''' UpperCAmelCase_ : List[Any] = ''' Args: - **predictions** (`list` of `int`): The predicted labels. - **references** (`list` of `int`): The ground truth labels. - **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None. - **pos_label** (`int`): The class label to use as the \'positive class\' when calculating the recall. Defaults to `1`. - **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`. - `\'binary\'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary. - `\'micro\'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives. - `\'macro\'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account. - `\'weighted\'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall. - `\'samples\'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification). - **sample_weight** (`list` of `float`): Sample weights Defaults to `None`. - **zero_division** (): Sets the value to return when there is a zero division. Defaults to . - `\'warn\'`: If there is a zero division, the return value is `0`, but warnings are also raised. - `0`: If there is a zero division, the return value is `0`. - `1`: If there is a zero division, the return value is `1`. Returns: - **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better. Examples: Example 1-A simple example with some errors >>> recall_metric = datasets.load_metric(\'recall\') >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1]) >>> print(results) {\'recall\': 0.6666666666666666} Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`. >>> recall_metric = datasets.load_metric(\'recall\') >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0) >>> print(results) {\'recall\': 0.5} Example 3-The same example as Example 1, but with `sample_weight` included. >>> recall_metric = datasets.load_metric(\'recall\') >>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8] >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight) >>> print(results) {\'recall\': 0.55} Example 4-A multiclass example, using different averages. >>> recall_metric = datasets.load_metric(\'recall\') >>> predictions = [0, 2, 1, 0, 0, 1] >>> references = [0, 1, 2, 0, 1, 2] >>> results = recall_metric.compute(predictions=predictions, references=references, average=\'macro\') >>> print(results) {\'recall\': 0.3333333333333333} >>> results = recall_metric.compute(predictions=predictions, references=references, average=\'micro\') >>> print(results) {\'recall\': 0.3333333333333333} >>> results = recall_metric.compute(predictions=predictions, references=references, average=\'weighted\') >>> print(results) {\'recall\': 0.3333333333333333} >>> results = recall_metric.compute(predictions=predictions, references=references, average=None) >>> print(results) {\'recall\': array([1., 0., 0.])} ''' UpperCAmelCase_ : Union[str, Any] = ''' @article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011} ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class _SCREAMING_SNAKE_CASE ( datasets.Metric ): def _A ( self : Tuple ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Sequence(datasets.Value("""int32""" ) ), """references""": datasets.Sequence(datasets.Value("""int32""" ) ), } if self.config_name == """multilabel""" else { """predictions""": datasets.Value("""int32""" ), """references""": datasets.Value("""int32""" ), } ) , reference_urls=["""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html"""] , ) def _A ( self : str , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : Dict=1 , __lowerCamelCase : Union[str, Any]="binary" , __lowerCamelCase : Dict=None , __lowerCamelCase : Tuple="warn" , ): UpperCamelCase :Tuple = recall_score( __lowerCamelCase , __lowerCamelCase , labels=__lowerCamelCase , pos_label=__lowerCamelCase , average=__lowerCamelCase , sample_weight=__lowerCamelCase , zero_division=__lowerCamelCase , ) return {"recall": float(__lowerCamelCase ) if score.size == 1 else score}
38
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor from .base import PipelineTool class _SCREAMING_SNAKE_CASE ( _a ): snake_case__ : Any = """openai/whisper-base""" snake_case__ : Optional[int] = ( """This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the """ """transcribed text.""" ) snake_case__ : Any = """transcriber""" snake_case__ : Optional[int] = WhisperProcessor snake_case__ : str = WhisperForConditionalGeneration snake_case__ : Optional[Any] = ["""audio"""] snake_case__ : Any = ["""text"""] def _A ( self : str , __lowerCamelCase : Dict ): return self.pre_processor(__lowerCamelCase , return_tensors="""pt""" ).input_features def _A ( self : Dict , __lowerCamelCase : List[Any] ): return self.model.generate(inputs=__lowerCamelCase ) def _A ( self : Any , __lowerCamelCase : Optional[Any] ): return self.pre_processor.batch_decode(__lowerCamelCase , skip_special_tokens=__lowerCamelCase )[0]
38
1
import json from typing import Dict, List, Optional, Tuple, Union from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import PaddingStrategy, logging from .tokenization_led import LEDTokenizer UpperCAmelCase_ : Union[str, Any] = logging.get_logger(__name__) UpperCAmelCase_ : Dict = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''} UpperCAmelCase_ : Dict = { '''vocab_file''': { '''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json''', }, '''merges_file''': { '''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt''', }, '''tokenizer_file''': { '''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json''', }, } UpperCAmelCase_ : Any = { '''allenai/led-base-16384''': 1_63_84, } class _SCREAMING_SNAKE_CASE ( _a ): snake_case__ : int = VOCAB_FILES_NAMES snake_case__ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP snake_case__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES snake_case__ : int = LEDTokenizer snake_case__ : str = ["""input_ids""", """attention_mask"""] def __init__( self : List[str] , __lowerCamelCase : Dict=None , __lowerCamelCase : Any=None , __lowerCamelCase : int=None , __lowerCamelCase : Any="replace" , __lowerCamelCase : Union[str, Any]="<s>" , __lowerCamelCase : str="</s>" , __lowerCamelCase : Tuple="</s>" , __lowerCamelCase : List[Any]="<s>" , __lowerCamelCase : Optional[int]="<unk>" , __lowerCamelCase : Any="<pad>" , __lowerCamelCase : Tuple="<mask>" , __lowerCamelCase : List[str]=False , __lowerCamelCase : List[Any]=True , **__lowerCamelCase : str , ): super().__init__( __lowerCamelCase , __lowerCamelCase , tokenizer_file=__lowerCamelCase , errors=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , unk_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase , **__lowerCamelCase , ) UpperCamelCase :Dict = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get("""add_prefix_space""" , __lowerCamelCase ) != add_prefix_space: UpperCamelCase :Union[str, Any] = getattr(__lowerCamelCase , pre_tok_state.pop("""type""" ) ) UpperCamelCase :Optional[Any] = add_prefix_space UpperCamelCase :Tuple = pre_tok_class(**__lowerCamelCase ) UpperCamelCase :Optional[Any] = add_prefix_space # the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__` UpperCamelCase :str = """post_processor""" UpperCamelCase :List[str] = getattr(self.backend_tokenizer , __lowerCamelCase , __lowerCamelCase ) if tokenizer_component_instance: UpperCamelCase :Tuple = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: UpperCamelCase :int = tuple(state["""sep"""] ) if "cls" in state: UpperCamelCase :Optional[int] = tuple(state["""cls"""] ) UpperCamelCase :Optional[Any] = False if state.get("""add_prefix_space""" , __lowerCamelCase ) != add_prefix_space: UpperCamelCase :Optional[Any] = add_prefix_space UpperCamelCase :Union[str, Any] = True if state.get("""trim_offsets""" , __lowerCamelCase ) != trim_offsets: UpperCamelCase :Tuple = trim_offsets UpperCamelCase :Tuple = True if changes_to_apply: UpperCamelCase :Tuple = getattr(__lowerCamelCase , state.pop("""type""" ) ) UpperCamelCase :int = component_class(**__lowerCamelCase ) setattr(self.backend_tokenizer , __lowerCamelCase , __lowerCamelCase ) @property # Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED def _A ( self : Dict ): if self._mask_token is None: if self.verbose: logger.error("""Using mask_token, but it is not set yet.""" ) return None return str(self._mask_token ) @mask_token.setter def _A ( self : Dict , __lowerCamelCase : List[str] ): UpperCamelCase :Dict = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else value UpperCamelCase :List[Any] = value def _A ( self : Union[str, Any] , *__lowerCamelCase : List[Any] , **__lowerCamelCase : Optional[int] ): UpperCamelCase :Optional[int] = kwargs.get("""is_split_into_words""" , __lowerCamelCase ) if is_split_into_words and not self.add_prefix_space: raise ValueError( F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """ """to use it with pretokenized inputs.""" ) return super()._batch_encode_plus(*__lowerCamelCase , **__lowerCamelCase ) def _A ( self : Tuple , *__lowerCamelCase : Union[str, Any] , **__lowerCamelCase : str ): UpperCamelCase :Tuple = kwargs.get("""is_split_into_words""" , __lowerCamelCase ) if is_split_into_words and not self.add_prefix_space: raise ValueError( F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """ """to use it with pretokenized inputs.""" ) return super()._encode_plus(*__lowerCamelCase , **__lowerCamelCase ) def _A ( self : Optional[int] , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ): UpperCamelCase :List[str] = self._tokenizer.model.save(__lowerCamelCase , name=__lowerCamelCase ) return tuple(__lowerCamelCase ) def _A ( self : int , __lowerCamelCase : Dict , __lowerCamelCase : Optional[Any]=None ): UpperCamelCase :List[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def _A ( self : List[Any] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ): UpperCamelCase :Dict = [self.sep_token_id] UpperCamelCase :List[Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def _A ( self : Tuple , __lowerCamelCase : Union[Dict[str, EncodedInput], BatchEncoding] , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[bool] = None , ): UpperCamelCase :List[Any] = super()._pad( encoded_inputs=__lowerCamelCase , max_length=__lowerCamelCase , padding_strategy=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_attention_mask=__lowerCamelCase , ) # Load from model defaults if return_attention_mask is None: UpperCamelCase :List[Any] = """attention_mask""" in self.model_input_names if return_attention_mask and "global_attention_mask" in encoded_inputs: UpperCamelCase :str = encoded_inputs[self.model_input_names[0]] # `global_attention_mask` need to have the same length as other (sequential) inputs. UpperCamelCase :Union[str, Any] = len(encoded_inputs["""global_attention_mask"""] ) != len(__lowerCamelCase ) if needs_to_be_padded: UpperCamelCase :Optional[int] = len(__lowerCamelCase ) - len(encoded_inputs["""global_attention_mask"""] ) if self.padding_side == "right": # Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend` UpperCamelCase :Optional[Any] = ( encoded_inputs["""global_attention_mask"""] + [-1] * difference ) elif self.padding_side == "left": UpperCamelCase :Union[str, Any] = [-1] * difference + encoded_inputs[ """global_attention_mask""" ] else: raise ValueError("""Invalid padding strategy:""" + str(self.padding_side ) ) return encoded_inputs
38
import copy from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Audio, Features, Value from .base import TaskTemplate @dataclass(frozen=_a ) class _SCREAMING_SNAKE_CASE ( _a ): snake_case__ : str = field(default="""automatic-speech-recognition""" , metadata={"""include_in_asdict_even_if_is_default""": True} ) snake_case__ : ClassVar[Features] = Features({"""audio""": Audio()} ) snake_case__ : ClassVar[Features] = Features({"""transcription""": Value("""string""" )} ) snake_case__ : str = "audio" snake_case__ : str = "transcription" def _A ( self : List[str] , __lowerCamelCase : Dict ): if self.audio_column not in features: raise ValueError(F"""Column {self.audio_column} is not present in features.""" ) if not isinstance(features[self.audio_column] , __lowerCamelCase ): raise ValueError(F"""Column {self.audio_column} is not an Audio type.""" ) UpperCamelCase :int = copy.deepcopy(self ) UpperCamelCase :Any = self.input_schema.copy() UpperCamelCase :List[str] = features[self.audio_column] UpperCamelCase :List[Any] = input_schema return task_template @property def _A ( self : Optional[int] ): return {self.audio_column: "audio", self.transcription_column: "transcription"}
38
1
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : int ) -> bool: """simple docstring""" return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number if __name__ == "__main__": print('''Program to check whether a number is a Perfect number or not...''') UpperCAmelCase_ : Optional[Any] = int(input('''Enter number: ''').strip()) print(F'''{number} is {"" if perfect(number) else "not "}a Perfect Number.''')
38
from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline else: from .camera import create_pan_cameras from .pipeline_shap_e import ShapEPipeline from .pipeline_shap_e_img2img import ShapEImgaImgPipeline from .renderer import ( BoundingBoxVolume, ImportanceRaySampler, MLPNeRFModelOutput, MLPNeRSTFModel, ShapEParamsProjModel, ShapERenderer, StratifiedRaySampler, VoidNeRFModel, )
38
1
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : list[list[int]] , __magic_name__ : int , __magic_name__ : int , __magic_name__ : list[int] ) -> bool: """simple docstring""" if graph[path[curr_ind - 1]][next_ver] == 0: return False # 2. Validate that next vertex is not already in path return not any(vertex == next_ver for vertex in path ) def SCREAMING_SNAKE_CASE_ ( __magic_name__ : list[list[int]] , __magic_name__ : list[int] , __magic_name__ : int ) -> bool: """simple docstring""" if curr_ind == len(__magic_name__ ): # return whether path exists between current and starting vertices return graph[path[curr_ind - 1]][path[0]] == 1 # Recursive Step for next_ver in range(0 , len(__magic_name__ ) ): if valid_connection(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ): # Insert current vertex into path as next transition UpperCamelCase :str = next_ver # Validate created path if util_hamilton_cycle(__magic_name__ , __magic_name__ , curr_ind + 1 ): return True # Backtrack UpperCamelCase :Union[str, Any] = -1 return False def SCREAMING_SNAKE_CASE_ ( __magic_name__ : list[list[int]] , __magic_name__ : int = 0 ) -> list[int]: """simple docstring""" UpperCamelCase :Union[str, Any] = [-1] * (len(__magic_name__ ) + 1) # initialize start and end of path with starting index UpperCamelCase :Any = start_index # evaluate and if we find answer return path either return empty array return path if util_hamilton_cycle(__magic_name__ , __magic_name__ , 1 ) else []
38
import re import string import numpy as np import datasets UpperCAmelCase_ : Dict = ''' Returns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list. ''' UpperCAmelCase_ : Any = ''' Args: predictions: List of predicted texts. references: List of reference texts. regexes_to_ignore: List, defaults to None. Regex expressions of characters to ignore when calculating the exact matches. Note: these regexes are removed from the input data before the changes based on the options below (e.g. ignore_case, ignore_punctuation, ignore_numbers) are applied. ignore_case: Boolean, defaults to False. If true, turns everything to lowercase so that capitalization differences are ignored. ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before comparing predictions and references. ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before comparing predictions and references. Returns: exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive. Examples: >>> exact_match = datasets.load_metric("exact_match") >>> refs = ["the cat", "theater", "YELLING", "agent007"] >>> preds = ["cat?", "theater", "yelling", "agent"] >>> results = exact_match.compute(references=refs, predictions=preds) >>> print(round(results["exact_match"], 1)) 25.0 >>> exact_match = datasets.load_metric("exact_match") >>> refs = ["the cat", "theater", "YELLING", "agent007"] >>> preds = ["cat?", "theater", "yelling", "agent"] >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell"], ignore_case=True, ignore_punctuation=True) >>> print(round(results["exact_match"], 1)) 50.0 >>> exact_match = datasets.load_metric("exact_match") >>> refs = ["the cat", "theater", "YELLING", "agent007"] >>> preds = ["cat?", "theater", "yelling", "agent"] >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True) >>> print(round(results["exact_match"], 1)) 75.0 >>> exact_match = datasets.load_metric("exact_match") >>> refs = ["the cat", "theater", "YELLING", "agent007"] >>> preds = ["cat?", "theater", "yelling", "agent"] >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True) >>> print(round(results["exact_match"], 1)) 100.0 >>> exact_match = datasets.load_metric("exact_match") >>> refs = ["The cat sat on the mat.", "Theaters are great.", "It\'s like comparing oranges and apples."] >>> preds = ["The cat sat on the mat?", "Theaters are great.", "It\'s like comparing apples and oranges."] >>> results = exact_match.compute(references=refs, predictions=preds) >>> print(round(results["exact_match"], 1)) 33.3 ''' UpperCAmelCase_ : Tuple = ''' ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class _SCREAMING_SNAKE_CASE ( datasets.Metric ): def _A ( self : Optional[int] ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Value("""string""" , id="""sequence""" ), """references""": datasets.Value("""string""" , id="""sequence""" ), } ) , reference_urls=[] , ) def _A ( self : Optional[int] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[str] , __lowerCamelCase : List[str]=None , __lowerCamelCase : Optional[int]=False , __lowerCamelCase : int=False , __lowerCamelCase : Optional[int]=False , ): if regexes_to_ignore is not None: for s in regexes_to_ignore: UpperCamelCase :str = np.array([re.sub(__lowerCamelCase , """""" , __lowerCamelCase ) for x in predictions] ) UpperCamelCase :Tuple = np.array([re.sub(__lowerCamelCase , """""" , __lowerCamelCase ) for x in references] ) else: UpperCamelCase :Any = np.asarray(__lowerCamelCase ) UpperCamelCase :str = np.asarray(__lowerCamelCase ) if ignore_case: UpperCamelCase :Tuple = np.char.lower(__lowerCamelCase ) UpperCamelCase :Any = np.char.lower(__lowerCamelCase ) if ignore_punctuation: UpperCamelCase :Optional[int] = string.punctuation.maketrans("""""" , """""" , string.punctuation ) UpperCamelCase :Optional[Any] = np.char.translate(__lowerCamelCase , table=__lowerCamelCase ) UpperCamelCase :List[str] = np.char.translate(__lowerCamelCase , table=__lowerCamelCase ) if ignore_numbers: UpperCamelCase :Tuple = string.digits.maketrans("""""" , """""" , string.digits ) UpperCamelCase :Dict = np.char.translate(__lowerCamelCase , table=__lowerCamelCase ) UpperCamelCase :Tuple = np.char.translate(__lowerCamelCase , table=__lowerCamelCase ) UpperCamelCase :int = predictions == references return {"exact_match": np.mean(__lowerCamelCase ) * 100}
38
1
from collections.abc import Sequence def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Sequence[float] , __magic_name__ : bool = False ) -> float: """simple docstring""" if not arr: return 0 UpperCamelCase :str = 0 if allow_empty_subarrays else float("""-inf""" ) UpperCamelCase :Tuple = 0.0 for num in arr: UpperCamelCase :int = max(0 if allow_empty_subarrays else num , curr_sum + num ) UpperCamelCase :Optional[int] = max(__magic_name__ , __magic_name__ ) return max_sum if __name__ == "__main__": from doctest import testmod testmod() UpperCAmelCase_ : Dict = [-2, 1, -3, 4, -1, 2, 1, -5, 4] print(F'''{max_subarray_sum(nums) = }''')
38
from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...onnx.utils import compute_effective_axis_dimension from ...utils import logging if TYPE_CHECKING: from ...processing_utils import ProcessorMixin from ...utils import TensorType UpperCAmelCase_ : Dict = logging.get_logger(__name__) UpperCAmelCase_ : str = { '''microsoft/layoutlmv3-base''': '''https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json''', } class _SCREAMING_SNAKE_CASE ( _a ): snake_case__ : Optional[int] = """layoutlmv3""" def __init__( self : List[Any] , __lowerCamelCase : Optional[Any]=50_265 , __lowerCamelCase : Dict=768 , __lowerCamelCase : Any=12 , __lowerCamelCase : int=12 , __lowerCamelCase : str=3_072 , __lowerCamelCase : List[Any]="gelu" , __lowerCamelCase : List[str]=0.1 , __lowerCamelCase : Optional[int]=0.1 , __lowerCamelCase : Optional[Any]=512 , __lowerCamelCase : Optional[int]=2 , __lowerCamelCase : Union[str, Any]=0.02 , __lowerCamelCase : Union[str, Any]=1E-5 , __lowerCamelCase : Any=1 , __lowerCamelCase : Optional[int]=0 , __lowerCamelCase : List[Any]=2 , __lowerCamelCase : Dict=1_024 , __lowerCamelCase : List[Any]=128 , __lowerCamelCase : str=128 , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : str=32 , __lowerCamelCase : List[Any]=128 , __lowerCamelCase : str=64 , __lowerCamelCase : List[str]=256 , __lowerCamelCase : Dict=True , __lowerCamelCase : List[Any]=True , __lowerCamelCase : Tuple=True , __lowerCamelCase : Tuple=224 , __lowerCamelCase : Tuple=3 , __lowerCamelCase : Dict=16 , __lowerCamelCase : Union[str, Any]=None , **__lowerCamelCase : Optional[Any] , ): super().__init__( vocab_size=__lowerCamelCase , hidden_size=__lowerCamelCase , num_hidden_layers=__lowerCamelCase , num_attention_heads=__lowerCamelCase , intermediate_size=__lowerCamelCase , hidden_act=__lowerCamelCase , hidden_dropout_prob=__lowerCamelCase , attention_probs_dropout_prob=__lowerCamelCase , max_position_embeddings=__lowerCamelCase , type_vocab_size=__lowerCamelCase , initializer_range=__lowerCamelCase , layer_norm_eps=__lowerCamelCase , pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase , ) UpperCamelCase :int = max_ad_position_embeddings UpperCamelCase :Tuple = coordinate_size UpperCamelCase :List[Any] = shape_size UpperCamelCase :Union[str, Any] = has_relative_attention_bias UpperCamelCase :Any = rel_pos_bins UpperCamelCase :Optional[Any] = max_rel_pos UpperCamelCase :str = has_spatial_attention_bias UpperCamelCase :Tuple = rel_ad_pos_bins UpperCamelCase :Optional[int] = max_rel_ad_pos UpperCamelCase :Tuple = text_embed UpperCamelCase :str = visual_embed UpperCamelCase :Optional[Any] = input_size UpperCamelCase :str = num_channels UpperCamelCase :List[Any] = patch_size UpperCamelCase :Optional[Any] = classifier_dropout class _SCREAMING_SNAKE_CASE ( _a ): snake_case__ : int = version.parse("""1.12""" ) @property def _A ( self : Optional[int] ): # The order of inputs is different for question answering and sequence classification if self.task in ["question-answering", "sequence-classification"]: return OrderedDict( [ ("""input_ids""", {0: """batch""", 1: """sequence"""}), ("""attention_mask""", {0: """batch""", 1: """sequence"""}), ("""bbox""", {0: """batch""", 1: """sequence"""}), ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) else: return OrderedDict( [ ("""input_ids""", {0: """batch""", 1: """sequence"""}), ("""bbox""", {0: """batch""", 1: """sequence"""}), ("""attention_mask""", {0: """batch""", 1: """sequence"""}), ("""pixel_values""", {0: """batch""", 1: """num_channels"""}), ] ) @property def _A ( self : str ): return 1E-5 @property def _A ( self : Dict ): return 12 def _A ( self : Dict , __lowerCamelCase : "ProcessorMixin" , __lowerCamelCase : int = -1 , __lowerCamelCase : int = -1 , __lowerCamelCase : bool = False , __lowerCamelCase : Optional["TensorType"] = None , __lowerCamelCase : int = 3 , __lowerCamelCase : int = 40 , __lowerCamelCase : int = 40 , ): setattr(processor.image_processor , """apply_ocr""" , __lowerCamelCase ) # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX UpperCamelCase :Optional[Any] = compute_effective_axis_dimension( __lowerCamelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX UpperCamelCase :Optional[int] = processor.tokenizer.num_special_tokens_to_add(__lowerCamelCase ) UpperCamelCase :int = compute_effective_axis_dimension( __lowerCamelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__lowerCamelCase ) # Generate dummy inputs according to compute batch and sequence UpperCamelCase :Any = [[""" """.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size # Generate dummy bounding boxes UpperCamelCase :Optional[Any] = [[[48, 84, 73, 128]]] * batch_size # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX # batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch) UpperCamelCase :List[str] = self._generate_dummy_images(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) UpperCamelCase :Any = dict( processor( __lowerCamelCase , text=__lowerCamelCase , boxes=__lowerCamelCase , return_tensors=__lowerCamelCase , ) ) return inputs
38
1
import unittest from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class _SCREAMING_SNAKE_CASE : @staticmethod def _A ( *__lowerCamelCase : Tuple , **__lowerCamelCase : str ): pass @is_pipeline_test @require_vision @require_torch class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): snake_case__ : Any = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING def _A ( self : List[Any] , __lowerCamelCase : Tuple , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Tuple ): UpperCamelCase :Any = pipeline( """zero-shot-object-detection""" , model="""hf-internal-testing/tiny-random-owlvit-object-detection""" ) UpperCamelCase :Tuple = [ { """image""": """./tests/fixtures/tests_samples/COCO/000000039769.png""", """candidate_labels""": ["""cat""", """remote""", """couch"""], } ] return object_detector, examples def _A ( self : int , __lowerCamelCase : Tuple , __lowerCamelCase : Tuple ): UpperCamelCase :Tuple = object_detector(examples[0] , threshold=0.0 ) UpperCamelCase :Optional[Any] = len(__lowerCamelCase ) self.assertGreater(__lowerCamelCase , 0 ) self.assertEqual( __lowerCamelCase , [ { """score""": ANY(__lowerCamelCase ), """label""": ANY(__lowerCamelCase ), """box""": {"""xmin""": ANY(__lowerCamelCase ), """ymin""": ANY(__lowerCamelCase ), """xmax""": ANY(__lowerCamelCase ), """ymax""": ANY(__lowerCamelCase )}, } for i in range(__lowerCamelCase ) ] , ) @require_tf @unittest.skip("""Zero Shot Object Detection not implemented in TF""" ) def _A ( self : Dict ): pass @require_torch def _A ( self : List[str] ): UpperCamelCase :Tuple = pipeline( """zero-shot-object-detection""" , model="""hf-internal-testing/tiny-random-owlvit-object-detection""" ) UpperCamelCase :Tuple = object_detector( """./tests/fixtures/tests_samples/COCO/000000039769.png""" , candidate_labels=["""cat""", """remote""", """couch"""] , threshold=0.64 , ) self.assertEqual( nested_simplify(__lowerCamelCase , decimals=4 ) , [ {"""score""": 0.7235, """label""": """cat""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}}, {"""score""": 0.7218, """label""": """remote""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}}, {"""score""": 0.7184, """label""": """couch""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}}, {"""score""": 0.6748, """label""": """remote""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}}, {"""score""": 0.6656, """label""": """cat""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}}, {"""score""": 0.6614, """label""": """couch""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}}, {"""score""": 0.6456, """label""": """remote""", """box""": {"""xmin""": 494, """ymin""": 105, """xmax""": 521, """ymax""": 127}}, {"""score""": 0.642, """label""": """remote""", """box""": {"""xmin""": 67, """ymin""": 274, """xmax""": 93, """ymax""": 297}}, {"""score""": 0.6419, """label""": """cat""", """box""": {"""xmin""": 494, """ymin""": 105, """xmax""": 521, """ymax""": 127}}, ] , ) UpperCamelCase :Any = object_detector( [ { """image""": """./tests/fixtures/tests_samples/COCO/000000039769.png""", """candidate_labels""": ["""cat""", """remote""", """couch"""], } ] , threshold=0.64 , ) self.assertEqual( nested_simplify(__lowerCamelCase , decimals=4 ) , [ [ {"""score""": 0.7235, """label""": """cat""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}}, {"""score""": 0.7218, """label""": """remote""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}}, {"""score""": 0.7184, """label""": """couch""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}}, {"""score""": 0.6748, """label""": """remote""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}}, {"""score""": 0.6656, """label""": """cat""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}}, {"""score""": 0.6614, """label""": """couch""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}}, {"""score""": 0.6456, """label""": """remote""", """box""": {"""xmin""": 494, """ymin""": 105, """xmax""": 521, """ymax""": 127}}, {"""score""": 0.642, """label""": """remote""", """box""": {"""xmin""": 67, """ymin""": 274, """xmax""": 93, """ymax""": 297}}, {"""score""": 0.6419, """label""": """cat""", """box""": {"""xmin""": 494, """ymin""": 105, """xmax""": 521, """ymax""": 127}}, ] ] , ) @require_torch @slow def _A ( self : Optional[Any] ): UpperCamelCase :Optional[int] = pipeline("""zero-shot-object-detection""" ) UpperCamelCase :Union[str, Any] = object_detector( """http://images.cocodataset.org/val2017/000000039769.jpg""" , candidate_labels=["""cat""", """remote""", """couch"""] , ) self.assertEqual( nested_simplify(__lowerCamelCase , decimals=4 ) , [ {"""score""": 0.2868, """label""": """cat""", """box""": {"""xmin""": 324, """ymin""": 20, """xmax""": 640, """ymax""": 373}}, {"""score""": 0.277, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 72, """xmax""": 177, """ymax""": 115}}, {"""score""": 0.2537, """label""": """cat""", """box""": {"""xmin""": 1, """ymin""": 55, """xmax""": 315, """ymax""": 472}}, {"""score""": 0.1474, """label""": """remote""", """box""": {"""xmin""": 335, """ymin""": 74, """xmax""": 371, """ymax""": 187}}, {"""score""": 0.1208, """label""": """couch""", """box""": {"""xmin""": 4, """ymin""": 0, """xmax""": 642, """ymax""": 476}}, ] , ) UpperCamelCase :List[str] = object_detector( [ { """image""": """http://images.cocodataset.org/val2017/000000039769.jpg""", """candidate_labels""": ["""cat""", """remote""", """couch"""], }, { """image""": """http://images.cocodataset.org/val2017/000000039769.jpg""", """candidate_labels""": ["""cat""", """remote""", """couch"""], }, ] , ) self.assertEqual( nested_simplify(__lowerCamelCase , decimals=4 ) , [ [ {"""score""": 0.2868, """label""": """cat""", """box""": {"""xmin""": 324, """ymin""": 20, """xmax""": 640, """ymax""": 373}}, {"""score""": 0.277, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 72, """xmax""": 177, """ymax""": 115}}, {"""score""": 0.2537, """label""": """cat""", """box""": {"""xmin""": 1, """ymin""": 55, """xmax""": 315, """ymax""": 472}}, {"""score""": 0.1474, """label""": """remote""", """box""": {"""xmin""": 335, """ymin""": 74, """xmax""": 371, """ymax""": 187}}, {"""score""": 0.1208, """label""": """couch""", """box""": {"""xmin""": 4, """ymin""": 0, """xmax""": 642, """ymax""": 476}}, ], [ {"""score""": 0.2868, """label""": """cat""", """box""": {"""xmin""": 324, """ymin""": 20, """xmax""": 640, """ymax""": 373}}, {"""score""": 0.277, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 72, """xmax""": 177, """ymax""": 115}}, {"""score""": 0.2537, """label""": """cat""", """box""": {"""xmin""": 1, """ymin""": 55, """xmax""": 315, """ymax""": 472}}, {"""score""": 0.1474, """label""": """remote""", """box""": {"""xmin""": 335, """ymin""": 74, """xmax""": 371, """ymax""": 187}}, {"""score""": 0.1208, """label""": """couch""", """box""": {"""xmin""": 4, """ymin""": 0, """xmax""": 642, """ymax""": 476}}, ], ] , ) @require_tf @unittest.skip("""Zero Shot Object Detection not implemented in TF""" ) def _A ( self : List[Any] ): pass @require_torch @slow def _A ( self : str ): UpperCamelCase :Dict = 0.2 UpperCamelCase :Optional[Any] = pipeline("""zero-shot-object-detection""" ) UpperCamelCase :Optional[Any] = object_detector( """http://images.cocodataset.org/val2017/000000039769.jpg""" , candidate_labels=["""cat""", """remote""", """couch"""] , threshold=__lowerCamelCase , ) self.assertEqual( nested_simplify(__lowerCamelCase , decimals=4 ) , [ {"""score""": 0.2868, """label""": """cat""", """box""": {"""xmin""": 324, """ymin""": 20, """xmax""": 640, """ymax""": 373}}, {"""score""": 0.277, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 72, """xmax""": 177, """ymax""": 115}}, {"""score""": 0.2537, """label""": """cat""", """box""": {"""xmin""": 1, """ymin""": 55, """xmax""": 315, """ymax""": 472}}, ] , ) @require_torch @slow def _A ( self : Optional[int] ): UpperCamelCase :int = 2 UpperCamelCase :List[str] = pipeline("""zero-shot-object-detection""" ) UpperCamelCase :Union[str, Any] = object_detector( """http://images.cocodataset.org/val2017/000000039769.jpg""" , candidate_labels=["""cat""", """remote""", """couch"""] , top_k=__lowerCamelCase , ) self.assertEqual( nested_simplify(__lowerCamelCase , decimals=4 ) , [ {"""score""": 0.2868, """label""": """cat""", """box""": {"""xmin""": 324, """ymin""": 20, """xmax""": 640, """ymax""": 373}}, {"""score""": 0.277, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 72, """xmax""": 177, """ymax""": 115}}, ] , )
38
import gc import random import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import ( AutoencoderKL, DiffusionPipeline, EulerDiscreteScheduler, StableDiffusionXLImgaImgPipeline, UNetaDConditionModel, ) from diffusers.utils import floats_tensor, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class _SCREAMING_SNAKE_CASE ( _a , _a , unittest.TestCase ): snake_case__ : Any = StableDiffusionXLImgaImgPipeline snake_case__ : Tuple = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width"""} snake_case__ : Tuple = PipelineTesterMixin.required_optional_params - {"""latents"""} snake_case__ : Any = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS snake_case__ : List[str] = IMAGE_TO_IMAGE_IMAGE_PARAMS snake_case__ : Tuple = IMAGE_TO_IMAGE_IMAGE_PARAMS def _A ( self : int ): torch.manual_seed(0 ) UpperCamelCase :Any = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , attention_head_dim=(2, 4) , use_linear_projection=__lowerCamelCase , addition_embed_type="""text_time""" , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=80 , cross_attention_dim=64 , ) UpperCamelCase :Tuple = EulerDiscreteScheduler( beta_start=0.00085 , beta_end=0.012 , steps_offset=1 , beta_schedule="""scaled_linear""" , timestep_spacing="""leading""" , ) torch.manual_seed(0 ) UpperCamelCase :Union[str, Any] = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , ) torch.manual_seed(0 ) UpperCamelCase :Optional[int] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act="""gelu""" , projection_dim=32 , ) UpperCamelCase :Any = CLIPTextModel(__lowerCamelCase ) UpperCamelCase :List[Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" , local_files_only=__lowerCamelCase ) UpperCamelCase :List[Any] = CLIPTextModelWithProjection(__lowerCamelCase ) UpperCamelCase :int = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" , local_files_only=__lowerCamelCase ) UpperCamelCase :Union[str, Any] = { """unet""": unet, """scheduler""": scheduler, """vae""": vae, """text_encoder""": text_encoder, """tokenizer""": tokenizer, """text_encoder_2""": text_encoder_a, """tokenizer_2""": tokenizer_a, # "safety_checker": None, # "feature_extractor": None, } return components def _A ( self : Tuple , __lowerCamelCase : Any , __lowerCamelCase : Optional[Any]=0 ): UpperCamelCase :Tuple = floats_tensor((1, 3, 32, 32) , rng=random.Random(__lowerCamelCase ) ).to(__lowerCamelCase ) UpperCamelCase :List[str] = image / 2 + 0.5 if str(__lowerCamelCase ).startswith("""mps""" ): UpperCamelCase :Any = torch.manual_seed(__lowerCamelCase ) else: UpperCamelCase :List[Any] = torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase ) UpperCamelCase :str = { """prompt""": """A painting of a squirrel eating a burger""", """image""": image, """generator""": generator, """num_inference_steps""": 2, """guidance_scale""": 5.0, """output_type""": """numpy""", """strength""": 0.75, } return inputs def _A ( self : str ): UpperCamelCase :List[str] = """cpu""" # ensure determinism for the device-dependent torch.Generator UpperCamelCase :Optional[Any] = self.get_dummy_components() UpperCamelCase :List[Any] = StableDiffusionXLImgaImgPipeline(**__lowerCamelCase ) UpperCamelCase :Any = sd_pipe.to(__lowerCamelCase ) sd_pipe.set_progress_bar_config(disable=__lowerCamelCase ) UpperCamelCase :Optional[Any] = self.get_dummy_inputs(__lowerCamelCase ) UpperCamelCase :Union[str, Any] = sd_pipe(**__lowerCamelCase ).images UpperCamelCase :Union[str, Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) UpperCamelCase :List[Any] = np.array([0.4656, 0.4840, 0.4439, 0.6698, 0.5574, 0.4524, 0.5799, 0.5943, 0.5165] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def _A ( self : Dict ): super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 ) def _A ( self : Optional[Any] ): super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) def _A ( self : Union[str, Any] ): pass def _A ( self : Optional[int] ): UpperCamelCase :Union[str, Any] = self.get_dummy_components() UpperCamelCase :Dict = StableDiffusionXLImgaImgPipeline(**__lowerCamelCase ) UpperCamelCase :List[Any] = sd_pipe.to(__lowerCamelCase ) UpperCamelCase :List[str] = sd_pipe.to(__lowerCamelCase ) sd_pipe.set_progress_bar_config(disable=__lowerCamelCase ) # forward without prompt embeds UpperCamelCase :List[Any] = self.get_dummy_inputs(__lowerCamelCase ) UpperCamelCase :int = 3 * ["""this is a negative prompt"""] UpperCamelCase :Union[str, Any] = negative_prompt UpperCamelCase :Union[str, Any] = 3 * [inputs["""prompt"""]] UpperCamelCase :Dict = sd_pipe(**__lowerCamelCase ) UpperCamelCase :Union[str, Any] = output.images[0, -3:, -3:, -1] # forward with prompt embeds UpperCamelCase :Union[str, Any] = self.get_dummy_inputs(__lowerCamelCase ) UpperCamelCase :Optional[int] = 3 * ["""this is a negative prompt"""] UpperCamelCase :Union[str, Any] = 3 * [inputs.pop("""prompt""" )] ( ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ) :Union[str, Any] = sd_pipe.encode_prompt(__lowerCamelCase , negative_prompt=__lowerCamelCase ) UpperCamelCase :Dict = sd_pipe( **__lowerCamelCase , prompt_embeds=__lowerCamelCase , negative_prompt_embeds=__lowerCamelCase , pooled_prompt_embeds=__lowerCamelCase , negative_pooled_prompt_embeds=__lowerCamelCase , ) UpperCamelCase :Union[str, Any] = output.images[0, -3:, -3:, -1] # make sure that it's equal assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4 @slow @require_torch_gpu class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): def _A ( self : Tuple ): super().tearDown() gc.collect() torch.cuda.empty_cache() def _A ( self : List[Any] , __lowerCamelCase : Any , __lowerCamelCase : Dict="cpu" , __lowerCamelCase : List[Any]=torch.floataa , __lowerCamelCase : Tuple=0 ): UpperCamelCase :Optional[int] = torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase ) UpperCamelCase :Optional[Any] = np.random.RandomState(__lowerCamelCase ).standard_normal((1, 4, 64, 64) ) UpperCamelCase :Dict = torch.from_numpy(__lowerCamelCase ).to(device=__lowerCamelCase , dtype=__lowerCamelCase ) UpperCamelCase :str = { """prompt""": """a photograph of an astronaut riding a horse""", """latents""": latents, """generator""": generator, """num_inference_steps""": 3, """guidance_scale""": 7.5, """output_type""": """numpy""", } return inputs def _A ( self : Optional[Any] ): UpperCamelCase :Any = DiffusionPipeline.from_pretrained("""stabilityai/stable-diffusion-2-base""" ) pipe.to(__lowerCamelCase ) pipe.set_progress_bar_config(disable=__lowerCamelCase ) UpperCamelCase :Optional[Any] = self.get_inputs(__lowerCamelCase ) UpperCamelCase :Optional[int] = pipe(**__lowerCamelCase ).images UpperCamelCase :Dict = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) UpperCamelCase :Union[str, Any] = np.array([0.49493, 0.47896, 0.40798, 0.54214, 0.53212, 0.48202, 0.47656, 0.46329, 0.48506] ) assert np.abs(image_slice - expected_slice ).max() < 7E-3
38
1
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Dict ) -> int: """simple docstring""" UpperCamelCase :str = len(__magic_name__ ) while cur > 1: # Find the maximum number in arr UpperCamelCase :Optional[int] = arr.index(max(arr[0:cur] ) ) # Reverse from 0 to mi UpperCamelCase :List[Any] = arr[mi::-1] + arr[mi + 1 : len(__magic_name__ )] # Reverse whole list UpperCamelCase :Dict = arr[cur - 1 :: -1] + arr[cur : len(__magic_name__ )] cur -= 1 return arr if __name__ == "__main__": UpperCAmelCase_ : Union[str, Any] = input('''Enter numbers separated by a comma:\n''').strip() UpperCAmelCase_ : List[str] = [int(item) for item in user_input.split(''',''')] print(pancake_sort(unsorted))
38
from ....configuration_utils import PretrainedConfig from ....utils import logging UpperCAmelCase_ : List[str] = logging.get_logger(__name__) UpperCAmelCase_ : int = { '''CarlCochet/trajectory-transformer-halfcheetah-medium-v2''': ( '''https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json''' ), # See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer } class _SCREAMING_SNAKE_CASE ( _a ): snake_case__ : Any = """trajectory_transformer""" snake_case__ : Optional[Any] = ["""past_key_values"""] snake_case__ : Tuple = { """hidden_size""": """n_embd""", """num_attention_heads""": """n_head""", """num_hidden_layers""": """n_layer""", } def __init__( self : Union[str, Any] , __lowerCamelCase : Any=100 , __lowerCamelCase : str=5 , __lowerCamelCase : str=1 , __lowerCamelCase : Optional[int]=1 , __lowerCamelCase : int=249 , __lowerCamelCase : str=6 , __lowerCamelCase : Dict=17 , __lowerCamelCase : Optional[Any]=25 , __lowerCamelCase : List[str]=4 , __lowerCamelCase : str=4 , __lowerCamelCase : Tuple=128 , __lowerCamelCase : Dict=0.1 , __lowerCamelCase : str=0.1 , __lowerCamelCase : Any=0.1 , __lowerCamelCase : int=0.0006 , __lowerCamelCase : List[str]=512 , __lowerCamelCase : str=0.02 , __lowerCamelCase : Any=1E-12 , __lowerCamelCase : int=1 , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : Tuple=1 , __lowerCamelCase : int=50_256 , __lowerCamelCase : Union[str, Any]=50_256 , **__lowerCamelCase : Dict , ): UpperCamelCase :Dict = vocab_size UpperCamelCase :int = action_weight UpperCamelCase :Tuple = reward_weight UpperCamelCase :str = value_weight UpperCamelCase :Tuple = max_position_embeddings UpperCamelCase :Tuple = block_size UpperCamelCase :Optional[int] = action_dim UpperCamelCase :int = observation_dim UpperCamelCase :List[str] = transition_dim UpperCamelCase :List[Any] = learning_rate UpperCamelCase :Optional[Any] = n_layer UpperCamelCase :Any = n_head UpperCamelCase :List[str] = n_embd UpperCamelCase :Any = embd_pdrop UpperCamelCase :str = attn_pdrop UpperCamelCase :Union[str, Any] = resid_pdrop UpperCamelCase :Optional[Any] = initializer_range UpperCamelCase :List[Any] = layer_norm_eps UpperCamelCase :Optional[int] = kaiming_initializer_range UpperCamelCase :Tuple = use_cache super().__init__(pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase )
38
1
from __future__ import annotations class _SCREAMING_SNAKE_CASE : def __init__( self : List[Any] , __lowerCamelCase : int ): UpperCamelCase :Tuple = order # a_{0} ... a_{k} UpperCamelCase :Dict = [1.0] + [0.0] * order # b_{0} ... b_{k} UpperCamelCase :str = [1.0] + [0.0] * order # x[n-1] ... x[n-k] UpperCamelCase :List[str] = [0.0] * self.order # y[n-1] ... y[n-k] UpperCamelCase :List[Any] = [0.0] * self.order def _A ( self : Optional[Any] , __lowerCamelCase : list[float] , __lowerCamelCase : list[float] ): if len(__lowerCamelCase ) < self.order: UpperCamelCase :List[Any] = [1.0, *a_coeffs] if len(__lowerCamelCase ) != self.order + 1: UpperCamelCase :int = ( F"""Expected a_coeffs to have {self.order + 1} elements """ F"""for {self.order}-order filter, got {len(__lowerCamelCase )}""" ) raise ValueError(__lowerCamelCase ) if len(__lowerCamelCase ) != self.order + 1: UpperCamelCase :int = ( F"""Expected b_coeffs to have {self.order + 1} elements """ F"""for {self.order}-order filter, got {len(__lowerCamelCase )}""" ) raise ValueError(__lowerCamelCase ) UpperCamelCase :Optional[Any] = a_coeffs UpperCamelCase :Dict = b_coeffs def _A ( self : List[str] , __lowerCamelCase : float ): UpperCamelCase :Tuple = 0.0 # Start at index 1 and do index 0 at the end. for i in range(1 , self.order + 1 ): result += ( self.b_coeffs[i] * self.input_history[i - 1] - self.a_coeffs[i] * self.output_history[i - 1] ) UpperCamelCase :Dict = (result + self.b_coeffs[0] * sample) / self.a_coeffs[0] UpperCamelCase :int = self.input_history[:-1] UpperCamelCase :str = self.output_history[:-1] UpperCamelCase :Optional[int] = sample UpperCamelCase :List[Any] = result return result
38
import math import numpy as np import qiskit from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute def SCREAMING_SNAKE_CASE_ ( __magic_name__ : int = 3 ) -> qiskit.result.counts.Counts: """simple docstring""" if isinstance(__magic_name__ , __magic_name__ ): raise TypeError("""number of qubits must be a integer.""" ) if number_of_qubits <= 0: raise ValueError("""number of qubits must be > 0.""" ) if math.floor(__magic_name__ ) != number_of_qubits: raise ValueError("""number of qubits must be exact integer.""" ) if number_of_qubits > 10: raise ValueError("""number of qubits too large to simulate(>10).""" ) UpperCamelCase :int = QuantumRegister(__magic_name__ , """qr""" ) UpperCamelCase :str = ClassicalRegister(__magic_name__ , """cr""" ) UpperCamelCase :str = QuantumCircuit(__magic_name__ , __magic_name__ ) UpperCamelCase :List[Any] = number_of_qubits for i in range(__magic_name__ ): quantum_circuit.h(number_of_qubits - i - 1 ) counter -= 1 for j in range(__magic_name__ ): quantum_circuit.cp(np.pi / 2 ** (counter - j) , __magic_name__ , __magic_name__ ) for k in range(number_of_qubits // 2 ): quantum_circuit.swap(__magic_name__ , number_of_qubits - k - 1 ) # measure all the qubits quantum_circuit.measure(__magic_name__ , __magic_name__ ) # simulate with 10000 shots UpperCamelCase :str = Aer.get_backend("""qasm_simulator""" ) UpperCamelCase :Dict = execute(__magic_name__ , __magic_name__ , shots=1_0000 ) return job.result().get_counts(__magic_name__ ) if __name__ == "__main__": print( F'''Total count for quantum fourier transform state is: \ {quantum_fourier_transform(3)}''' )
38
1
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : int = 1000 ) -> int: """simple docstring""" return sum(e for e in range(3 , __magic_name__ ) if e % 3 == 0 or e % 5 == 0 ) if __name__ == "__main__": print(F'''{solution() = }''')
38
import unittest from pathlib import Path from tempfile import TemporaryDirectory from transformers import AutoConfig, TFAutoModel, is_tensorflow_text_available, is_tf_available from transformers.models.bert.tokenization_bert import BertTokenizer from transformers.testing_utils import require_tensorflow_text, require_tf, slow if is_tf_available(): import tensorflow as tf if is_tensorflow_text_available(): from transformers.models.bert import TFBertTokenizer UpperCAmelCase_ : Optional[Any] = ['''bert-base-uncased''', '''bert-base-cased'''] UpperCAmelCase_ : List[str] = '''hf-internal-testing/tiny-bert-tf-only''' if is_tf_available(): class _SCREAMING_SNAKE_CASE ( tf.keras.Model ): def __init__( self : List[str] , __lowerCamelCase : Union[str, Any] ): super().__init__() UpperCamelCase :Any = tokenizer UpperCamelCase :List[str] = AutoConfig.from_pretrained(__lowerCamelCase ) UpperCamelCase :List[str] = TFAutoModel.from_config(__lowerCamelCase ) def _A ( self : Tuple , __lowerCamelCase : str ): UpperCamelCase :str = self.tokenizer(__lowerCamelCase ) UpperCamelCase :Any = self.bert(**__lowerCamelCase ) return out["pooler_output"] @require_tf @require_tensorflow_text class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): def _A ( self : Dict ): super().setUp() UpperCamelCase :int = [ BertTokenizer.from_pretrained(__lowerCamelCase ) for checkpoint in (TOKENIZER_CHECKPOINTS * 2) ] # repeat for when fast_bert_tokenizer=false UpperCamelCase :Any = [TFBertTokenizer.from_pretrained(__lowerCamelCase ) for checkpoint in TOKENIZER_CHECKPOINTS] + [ TFBertTokenizer.from_pretrained(__lowerCamelCase , use_fast_bert_tokenizer=__lowerCamelCase ) for checkpoint in TOKENIZER_CHECKPOINTS ] assert len(self.tokenizers ) == len(self.tf_tokenizers ) UpperCamelCase :Any = [ """This is a straightforward English test sentence.""", """This one has some weird characters\rto\nsee\r\nif those\u00E9break things.""", """Now we're going to add some Chinese: 一 二 三 一二三""", """And some much more rare Chinese: 齉 堃 齉堃""", """Je vais aussi écrire en français pour tester les accents""", """Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ""", ] UpperCamelCase :Union[str, Any] = list(zip(self.test_sentences , self.test_sentences[::-1] ) ) def _A ( self : Optional[int] ): for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ): for test_inputs in (self.test_sentences, self.paired_sentences): UpperCamelCase :Any = tokenizer(__lowerCamelCase , return_tensors="""tf""" , padding="""longest""" ) UpperCamelCase :str = tf_tokenizer(__lowerCamelCase ) for key in python_outputs.keys(): self.assertTrue(tf.reduce_all(python_outputs[key].shape == tf_outputs[key].shape ) ) self.assertTrue(tf.reduce_all(tf.cast(python_outputs[key] , tf.intaa ) == tf_outputs[key] ) ) @slow def _A ( self : Dict ): for tf_tokenizer in self.tf_tokenizers: UpperCamelCase :str = tf_tokenizer(self.paired_sentences ) UpperCamelCase :Any = tf_tokenizer( text=[sentence[0] for sentence in self.paired_sentences] , text_pair=[sentence[1] for sentence in self.paired_sentences] , ) for key in merged_outputs.keys(): self.assertTrue(tf.reduce_all(tf.cast(merged_outputs[key] , tf.intaa ) == separated_outputs[key] ) ) @slow def _A ( self : List[str] ): for tf_tokenizer in self.tf_tokenizers: UpperCamelCase :List[Any] = tf.function(__lowerCamelCase ) for test_inputs in (self.test_sentences, self.paired_sentences): UpperCamelCase :Any = tf.constant(__lowerCamelCase ) UpperCamelCase :List[str] = compiled_tokenizer(__lowerCamelCase ) UpperCamelCase :Optional[Any] = tf_tokenizer(__lowerCamelCase ) for key in eager_outputs.keys(): self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) ) @slow def _A ( self : Tuple ): for tf_tokenizer in self.tf_tokenizers: UpperCamelCase :List[str] = ModelToSave(tokenizer=__lowerCamelCase ) UpperCamelCase :Union[str, Any] = tf.convert_to_tensor(self.test_sentences ) UpperCamelCase :Union[str, Any] = model(__lowerCamelCase ) # Build model with some sample inputs with TemporaryDirectory() as tempdir: UpperCamelCase :List[str] = Path(__lowerCamelCase ) / """saved.model""" model.save(__lowerCamelCase ) UpperCamelCase :List[Any] = tf.keras.models.load_model(__lowerCamelCase ) UpperCamelCase :Dict = loaded_model(__lowerCamelCase ) # We may see small differences because the loaded model is compiled, so we need an epsilon for the test self.assertLessEqual(tf.reduce_max(tf.abs(out - loaded_output ) ) , 1E-5 )
38
1
import unittest from transformers import AutoConfig, AutoTokenizer, BertConfig, TensorType, is_flax_available from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, slow if is_flax_available(): import jax from transformers.models.auto.modeling_flax_auto import FlaxAutoModel from transformers.models.bert.modeling_flax_bert import FlaxBertModel from transformers.models.roberta.modeling_flax_roberta import FlaxRobertaModel @require_flax class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): @slow def _A ( self : Any ): for model_name in ["bert-base-cased", "bert-large-uncased"]: with self.subTest(__lowerCamelCase ): UpperCamelCase :List[str] = AutoConfig.from_pretrained(__lowerCamelCase ) self.assertIsNotNone(__lowerCamelCase ) self.assertIsInstance(__lowerCamelCase , __lowerCamelCase ) UpperCamelCase :Optional[int] = FlaxAutoModel.from_pretrained(__lowerCamelCase ) self.assertIsNotNone(__lowerCamelCase ) self.assertIsInstance(__lowerCamelCase , __lowerCamelCase ) @slow def _A ( self : Optional[int] ): for model_name in ["roberta-base", "roberta-large"]: with self.subTest(__lowerCamelCase ): UpperCamelCase :Dict = AutoConfig.from_pretrained(__lowerCamelCase ) self.assertIsNotNone(__lowerCamelCase ) self.assertIsInstance(__lowerCamelCase , __lowerCamelCase ) UpperCamelCase :Union[str, Any] = FlaxAutoModel.from_pretrained(__lowerCamelCase ) self.assertIsNotNone(__lowerCamelCase ) self.assertIsInstance(__lowerCamelCase , __lowerCamelCase ) @slow def _A ( self : int ): for model_name in ["bert-base-cased", "bert-large-uncased"]: UpperCamelCase :str = AutoTokenizer.from_pretrained(__lowerCamelCase ) UpperCamelCase :Union[str, Any] = FlaxBertModel.from_pretrained(__lowerCamelCase ) UpperCamelCase :List[Any] = tokenizer("""Do you support jax jitted function?""" , return_tensors=TensorType.JAX ) @jax.jit def eval(**__lowerCamelCase : int ): return model(**__lowerCamelCase ) eval(**__lowerCamelCase ).block_until_ready() @slow def _A ( self : Union[str, Any] ): for model_name in ["roberta-base", "roberta-large"]: UpperCamelCase :Tuple = AutoTokenizer.from_pretrained(__lowerCamelCase ) UpperCamelCase :Any = FlaxRobertaModel.from_pretrained(__lowerCamelCase ) UpperCamelCase :List[str] = tokenizer("""Do you support jax jitted function?""" , return_tensors=TensorType.JAX ) @jax.jit def eval(**__lowerCamelCase : Any ): return model(**__lowerCamelCase ) eval(**__lowerCamelCase ).block_until_ready() def _A ( self : Any ): with self.assertRaisesRegex( __lowerCamelCase , """bert-base is not a local folder and is not a valid model identifier""" ): UpperCamelCase :Dict = FlaxAutoModel.from_pretrained("""bert-base""" ) def _A ( self : Union[str, Any] ): with self.assertRaisesRegex( __lowerCamelCase , R"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ): UpperCamelCase :Optional[int] = FlaxAutoModel.from_pretrained(__lowerCamelCase , revision="""aaaaaa""" ) def _A ( self : Any ): with self.assertRaisesRegex( __lowerCamelCase , """hf-internal-testing/config-no-model does not appear to have a file named flax_model.msgpack""" , ): UpperCamelCase :int = FlaxAutoModel.from_pretrained("""hf-internal-testing/config-no-model""" ) def _A ( self : List[Any] ): with self.assertRaisesRegex(__lowerCamelCase , """Use `from_pt=True` to load this model""" ): UpperCamelCase :str = FlaxAutoModel.from_pretrained("""hf-internal-testing/tiny-bert-pt-only""" )
38
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from pathlib import Path import torch from ...utils import is_npu_available, is_xpu_available from .config_args import ClusterConfig, default_json_config_file from .config_utils import SubcommandHelpFormatter UpperCAmelCase_ : Any = '''Create a default config file for Accelerate with only a few flags set.''' def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Optional[int]="no" , __magic_name__ : str = default_json_config_file , __magic_name__ : bool = False ) -> str: """simple docstring""" UpperCamelCase :Any = Path(__magic_name__ ) path.parent.mkdir(parents=__magic_name__ , exist_ok=__magic_name__ ) if path.exists(): print( f"""Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.""" ) return False UpperCamelCase :Dict = mixed_precision.lower() if mixed_precision not in ["no", "fp16", "bf16", "fp8"]: raise ValueError( f"""`mixed_precision` should be one of 'no', 'fp16', 'bf16', or 'fp8'. Received {mixed_precision}""" ) UpperCamelCase :Optional[Any] = { """compute_environment""": """LOCAL_MACHINE""", """mixed_precision""": mixed_precision, } if torch.cuda.is_available(): UpperCamelCase :Union[str, Any] = torch.cuda.device_count() UpperCamelCase :List[Any] = num_gpus UpperCamelCase :Dict = False if num_gpus > 1: UpperCamelCase :Any = """MULTI_GPU""" else: UpperCamelCase :Any = """NO""" elif is_xpu_available() and use_xpu: UpperCamelCase :Optional[Any] = torch.xpu.device_count() UpperCamelCase :Optional[int] = num_xpus UpperCamelCase :int = False if num_xpus > 1: UpperCamelCase :Union[str, Any] = """MULTI_XPU""" else: UpperCamelCase :Union[str, Any] = """NO""" elif is_npu_available(): UpperCamelCase :List[Any] = torch.npu.device_count() UpperCamelCase :Optional[Any] = num_npus UpperCamelCase :Tuple = False if num_npus > 1: UpperCamelCase :Optional[Any] = """MULTI_NPU""" else: UpperCamelCase :List[Any] = """NO""" else: UpperCamelCase :Any = 0 UpperCamelCase :Optional[Any] = True UpperCamelCase :Optional[Any] = 1 UpperCamelCase :List[str] = """NO""" UpperCamelCase :int = ClusterConfig(**__magic_name__ ) config.to_json_file(__magic_name__ ) return path def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Dict , __magic_name__ : Tuple ) -> List[str]: """simple docstring""" UpperCamelCase :Dict = parser.add_parser("""default""" , parents=__magic_name__ , help=__magic_name__ , formatter_class=__magic_name__ ) parser.add_argument( """--config_file""" , default=__magic_name__ , help=( """The path to use to store the config file. Will default to a file named default_config.yaml in the cache """ """location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have """ """such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed """ """with 'huggingface'.""" ) , dest="""save_location""" , ) parser.add_argument( """--mixed_precision""" , choices=["""no""", """fp16""", """bf16"""] , type=__magic_name__ , help="""Whether or not to use mixed precision training. """ """Choose between FP16 and BF16 (bfloat16) training. """ """BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.""" , default="""no""" , ) parser.set_defaults(func=__magic_name__ ) return parser def SCREAMING_SNAKE_CASE_ ( __magic_name__ : List[Any] ) -> List[str]: """simple docstring""" UpperCamelCase :Optional[Any] = write_basic_config(args.mixed_precision , args.save_location ) if config_file: print(f"""accelerate configuration saved at {config_file}""" )
38
1
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( MobileViTConfig, MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() UpperCAmelCase_ : Optional[Any] = logging.get_logger(__name__) def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Optional[int] ) -> Union[str, Any]: """simple docstring""" UpperCamelCase :List[str] = MobileViTConfig() # size of the architecture if "mobilevit_s" in mobilevit_name: UpperCamelCase :Any = [144, 192, 240] UpperCamelCase :Any = [16, 32, 64, 96, 128, 160, 640] elif "mobilevit_xs" in mobilevit_name: UpperCamelCase :str = [96, 120, 144] UpperCamelCase :Tuple = [16, 32, 48, 64, 80, 96, 384] elif "mobilevit_xxs" in mobilevit_name: UpperCamelCase :int = [64, 80, 96] UpperCamelCase :List[Any] = [16, 16, 24, 48, 64, 80, 320] UpperCamelCase :Tuple = 0.05 UpperCamelCase :List[Any] = 2.0 if mobilevit_name.startswith("""deeplabv3_""" ): UpperCamelCase :str = 512 UpperCamelCase :int = 16 UpperCamelCase :List[str] = 21 UpperCamelCase :Tuple = """pascal-voc-id2label.json""" else: UpperCamelCase :Optional[int] = 1000 UpperCamelCase :Any = """imagenet-1k-id2label.json""" UpperCamelCase :Tuple = """huggingface/label-files""" UpperCamelCase :Tuple = json.load(open(hf_hub_download(__magic_name__ , __magic_name__ , repo_type="""dataset""" ) , """r""" ) ) UpperCamelCase :Optional[int] = {int(__magic_name__ ): v for k, v in idalabel.items()} UpperCamelCase :List[str] = idalabel UpperCamelCase :Dict = {v: k for k, v in idalabel.items()} return config def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Optional[int] , __magic_name__ : List[str]=False ) -> Optional[Any]: """simple docstring""" for i in range(1 , 6 ): if f"""layer_{i}.""" in name: UpperCamelCase :Optional[int] = name.replace(f"""layer_{i}.""" , f"""encoder.layer.{i - 1}.""" ) if "conv_1." in name: UpperCamelCase :Tuple = name.replace("""conv_1.""" , """conv_stem.""" ) if ".block." in name: UpperCamelCase :Any = name.replace(""".block.""" , """.""" ) if "exp_1x1" in name: UpperCamelCase :str = name.replace("""exp_1x1""" , """expand_1x1""" ) if "red_1x1" in name: UpperCamelCase :Any = name.replace("""red_1x1""" , """reduce_1x1""" ) if ".local_rep.conv_3x3." in name: UpperCamelCase :Optional[Any] = name.replace(""".local_rep.conv_3x3.""" , """.conv_kxk.""" ) if ".local_rep.conv_1x1." in name: UpperCamelCase :Optional[Any] = name.replace(""".local_rep.conv_1x1.""" , """.conv_1x1.""" ) if ".norm." in name: UpperCamelCase :List[str] = name.replace(""".norm.""" , """.normalization.""" ) if ".conv." in name: UpperCamelCase :Dict = name.replace(""".conv.""" , """.convolution.""" ) if ".conv_proj." in name: UpperCamelCase :Any = name.replace(""".conv_proj.""" , """.conv_projection.""" ) for i in range(0 , 2 ): for j in range(0 , 4 ): if f""".{i}.{j}.""" in name: UpperCamelCase :Any = name.replace(f""".{i}.{j}.""" , f""".{i}.layer.{j}.""" ) for i in range(2 , 6 ): for j in range(0 , 4 ): if f""".{i}.{j}.""" in name: UpperCamelCase :str = name.replace(f""".{i}.{j}.""" , f""".{i}.""" ) if "expand_1x1" in name: UpperCamelCase :Any = name.replace("""expand_1x1""" , """downsampling_layer.expand_1x1""" ) if "conv_3x3" in name: UpperCamelCase :Any = name.replace("""conv_3x3""" , """downsampling_layer.conv_3x3""" ) if "reduce_1x1" in name: UpperCamelCase :List[str] = name.replace("""reduce_1x1""" , """downsampling_layer.reduce_1x1""" ) for i in range(2 , 5 ): if f""".global_rep.{i}.weight""" in name: UpperCamelCase :Tuple = name.replace(f""".global_rep.{i}.weight""" , """.layernorm.weight""" ) if f""".global_rep.{i}.bias""" in name: UpperCamelCase :str = name.replace(f""".global_rep.{i}.bias""" , """.layernorm.bias""" ) if ".global_rep." in name: UpperCamelCase :int = name.replace(""".global_rep.""" , """.transformer.""" ) if ".pre_norm_mha.0." in name: UpperCamelCase :Union[str, Any] = name.replace(""".pre_norm_mha.0.""" , """.layernorm_before.""" ) if ".pre_norm_mha.1.out_proj." in name: UpperCamelCase :Union[str, Any] = name.replace(""".pre_norm_mha.1.out_proj.""" , """.attention.output.dense.""" ) if ".pre_norm_ffn.0." in name: UpperCamelCase :Union[str, Any] = name.replace(""".pre_norm_ffn.0.""" , """.layernorm_after.""" ) if ".pre_norm_ffn.1." in name: UpperCamelCase :Any = name.replace(""".pre_norm_ffn.1.""" , """.intermediate.dense.""" ) if ".pre_norm_ffn.4." in name: UpperCamelCase :Any = name.replace(""".pre_norm_ffn.4.""" , """.output.dense.""" ) if ".transformer." in name: UpperCamelCase :Dict = name.replace(""".transformer.""" , """.transformer.layer.""" ) if ".aspp_layer." in name: UpperCamelCase :Optional[Any] = name.replace(""".aspp_layer.""" , """.""" ) if ".aspp_pool." in name: UpperCamelCase :Optional[Any] = name.replace(""".aspp_pool.""" , """.""" ) if "seg_head." in name: UpperCamelCase :str = name.replace("""seg_head.""" , """segmentation_head.""" ) if "segmentation_head.classifier.classifier." in name: UpperCamelCase :Dict = name.replace("""segmentation_head.classifier.classifier.""" , """segmentation_head.classifier.""" ) if "classifier.fc." in name: UpperCamelCase :Optional[int] = name.replace("""classifier.fc.""" , """classifier.""" ) elif (not base_model) and ("segmentation_head." not in name): UpperCamelCase :Any = """mobilevit.""" + name return name def SCREAMING_SNAKE_CASE_ ( __magic_name__ : str , __magic_name__ : int , __magic_name__ : str=False ) -> Optional[Any]: """simple docstring""" if base_model: UpperCamelCase :Tuple = """""" else: UpperCamelCase :str = """mobilevit.""" for key in orig_state_dict.copy().keys(): UpperCamelCase :Any = orig_state_dict.pop(__magic_name__ ) if key[:8] == "encoder.": UpperCamelCase :Tuple = key[8:] if "qkv" in key: UpperCamelCase :Optional[int] = key.split(""".""" ) UpperCamelCase :Tuple = int(key_split[0][6:] ) - 1 UpperCamelCase :Dict = int(key_split[3] ) UpperCamelCase :Dict = model.get_submodule(f"""{model_prefix}encoder.layer.{layer_num}""" ) UpperCamelCase :Tuple = layer.transformer.layer[transformer_num].attention.attention.all_head_size UpperCamelCase :int = ( f"""{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention.""" ) if "weight" in key: UpperCamelCase :Dict = val[:dim, :] UpperCamelCase :List[str] = val[dim : dim * 2, :] UpperCamelCase :Dict = val[-dim:, :] else: UpperCamelCase :Optional[int] = val[:dim] UpperCamelCase :List[Any] = val[dim : dim * 2] UpperCamelCase :Optional[int] = val[-dim:] else: UpperCamelCase :Union[str, Any] = val return orig_state_dict def SCREAMING_SNAKE_CASE_ ( ) -> List[str]: """simple docstring""" UpperCamelCase :Optional[int] = """http://images.cocodataset.org/val2017/000000039769.jpg""" UpperCamelCase :Dict = Image.open(requests.get(__magic_name__ , stream=__magic_name__ ).raw ) return im @torch.no_grad() def SCREAMING_SNAKE_CASE_ ( __magic_name__ : List[str] , __magic_name__ : Optional[int] , __magic_name__ : List[str] , __magic_name__ : Optional[int]=False ) -> int: """simple docstring""" UpperCamelCase :Optional[int] = get_mobilevit_config(__magic_name__ ) # load original state_dict UpperCamelCase :Optional[Any] = torch.load(__magic_name__ , map_location="""cpu""" ) # load 🤗 model if mobilevit_name.startswith("""deeplabv3_""" ): UpperCamelCase :Union[str, Any] = MobileViTForSemanticSegmentation(__magic_name__ ).eval() else: UpperCamelCase :Optional[int] = MobileViTForImageClassification(__magic_name__ ).eval() UpperCamelCase :List[str] = convert_state_dict(__magic_name__ , __magic_name__ ) model.load_state_dict(__magic_name__ ) # Check outputs on an image, prepared by MobileViTImageProcessor UpperCamelCase :Tuple = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 ) UpperCamelCase :Any = image_processor(images=prepare_img() , return_tensors="""pt""" ) UpperCamelCase :int = model(**__magic_name__ ) UpperCamelCase :Tuple = outputs.logits if mobilevit_name.startswith("""deeplabv3_""" ): assert logits.shape == (1, 21, 32, 32) if mobilevit_name == "deeplabv3_mobilevit_s": UpperCamelCase :Optional[int] = torch.tensor( [ [[6.2065, 6.1292, 6.2070], [6.1079, 6.1254, 6.1747], [6.0042, 6.1071, 6.1034]], [[-6.9253, -6.8653, -7.0398], [-7.3218, -7.3983, -7.3670], [-7.1961, -7.2482, -7.1569]], [[-4.4723, -4.4348, -4.3769], [-5.3629, -5.4632, -5.4598], [-5.1587, -5.3402, -5.5059]], ] ) elif mobilevit_name == "deeplabv3_mobilevit_xs": UpperCamelCase :Tuple = torch.tensor( [ [[5.4449, 5.5733, 5.6314], [5.1815, 5.3930, 5.5963], [5.1656, 5.4333, 5.4853]], [[-9.4423, -9.7766, -9.6714], [-9.1581, -9.5720, -9.5519], [-9.1006, -9.6458, -9.5703]], [[-7.7721, -7.3716, -7.1583], [-8.4599, -8.0624, -7.7944], [-8.4172, -7.8366, -7.5025]], ] ) elif mobilevit_name == "deeplabv3_mobilevit_xxs": UpperCamelCase :Optional[Any] = torch.tensor( [ [[6.9811, 6.9743, 7.3123], [7.1777, 7.1931, 7.3938], [7.5633, 7.8050, 7.8901]], [[-10.5536, -10.2332, -10.2924], [-10.2336, -9.8624, -9.5964], [-10.8840, -10.8158, -10.6659]], [[-3.4938, -3.0631, -2.8620], [-3.4205, -2.8135, -2.6875], [-3.4179, -2.7945, -2.8750]], ] ) else: raise ValueError(f"""Unknown mobilevit_name: {mobilevit_name}""" ) assert torch.allclose(logits[0, :3, :3, :3] , __magic_name__ , atol=1E-4 ) else: assert logits.shape == (1, 1000) if mobilevit_name == "mobilevit_s": UpperCamelCase :str = torch.tensor([-0.9866, 0.2392, -1.1241] ) elif mobilevit_name == "mobilevit_xs": UpperCamelCase :int = torch.tensor([-2.4761, -0.9399, -1.9587] ) elif mobilevit_name == "mobilevit_xxs": UpperCamelCase :Tuple = torch.tensor([-1.9364, -1.2327, -0.4653] ) else: raise ValueError(f"""Unknown mobilevit_name: {mobilevit_name}""" ) assert torch.allclose(logits[0, :3] , __magic_name__ , atol=1E-4 ) Path(__magic_name__ ).mkdir(exist_ok=__magic_name__ ) print(f"""Saving model {mobilevit_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(__magic_name__ ) print(f"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(__magic_name__ ) if push_to_hub: UpperCamelCase :Dict = { """mobilevit_s""": """mobilevit-small""", """mobilevit_xs""": """mobilevit-x-small""", """mobilevit_xxs""": """mobilevit-xx-small""", """deeplabv3_mobilevit_s""": """deeplabv3-mobilevit-small""", """deeplabv3_mobilevit_xs""": """deeplabv3-mobilevit-x-small""", """deeplabv3_mobilevit_xxs""": """deeplabv3-mobilevit-xx-small""", } print("""Pushing to the hub...""" ) UpperCamelCase :Dict = model_mapping[mobilevit_name] image_processor.push_to_hub(__magic_name__ , organization="""apple""" ) model.push_to_hub(__magic_name__ , organization="""apple""" ) if __name__ == "__main__": UpperCAmelCase_ : Tuple = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--mobilevit_name''', default='''mobilevit_s''', type=str, help=( '''Name of the MobileViT model you\'d like to convert. Should be one of \'mobilevit_s\', \'mobilevit_xs\',''' ''' \'mobilevit_xxs\', \'deeplabv3_mobilevit_s\', \'deeplabv3_mobilevit_xs\', \'deeplabv3_mobilevit_xxs\'.''' ), ) parser.add_argument( '''--checkpoint_path''', required=True, type=str, help='''Path to the original state dict (.pt file).''' ) parser.add_argument( '''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.''' ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.''' ) UpperCAmelCase_ : int = parser.parse_args() convert_movilevit_checkpoint( args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub )
38
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) UpperCAmelCase_ : str = {'''configuration_opt''': ['''OPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''OPTConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ : Tuple = [ '''OPT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''OPTForCausalLM''', '''OPTModel''', '''OPTPreTrainedModel''', '''OPTForSequenceClassification''', '''OPTForQuestionAnswering''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ : Any = ['''TFOPTForCausalLM''', '''TFOPTModel''', '''TFOPTPreTrainedModel'''] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ : Optional[Any] = [ '''FlaxOPTForCausalLM''', '''FlaxOPTModel''', '''FlaxOPTPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_opt import ( OPT_PRETRAINED_MODEL_ARCHIVE_LIST, OPTForCausalLM, OPTForQuestionAnswering, OPTForSequenceClassification, OPTModel, OPTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel else: import sys UpperCAmelCase_ : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
38
1
from __future__ import annotations def SCREAMING_SNAKE_CASE_ ( __magic_name__ : list[int] ) -> bool: """simple docstring""" return len(set(__magic_name__ ) ) == len(__magic_name__ ) if __name__ == "__main__": import doctest doctest.testmod()
38
import gc import random import unittest import numpy as np import torch from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline from diffusers.pipelines.shap_e import ShapERenderer from diffusers.utils import floats_tensor, load_image, load_numpy, slow from diffusers.utils.testing_utils import require_torch_gpu, torch_device from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference class _SCREAMING_SNAKE_CASE ( _a , unittest.TestCase ): snake_case__ : Tuple = ShapEImgaImgPipeline snake_case__ : Optional[Any] = ["""image"""] snake_case__ : Union[str, Any] = ["""image"""] snake_case__ : Optional[Any] = [ """num_images_per_prompt""", """num_inference_steps""", """generator""", """latents""", """guidance_scale""", """frame_size""", """output_type""", """return_dict""", ] snake_case__ : List[str] = False @property def _A ( self : Any ): return 32 @property def _A ( self : Any ): return 32 @property def _A ( self : Optional[Any] ): return self.time_input_dim * 4 @property def _A ( self : Union[str, Any] ): return 8 @property def _A ( self : int ): torch.manual_seed(0 ) UpperCamelCase :Union[str, Any] = CLIPVisionConfig( hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , ) UpperCamelCase :Optional[int] = CLIPVisionModel(__lowerCamelCase ) return model @property def _A ( self : str ): UpperCamelCase :Optional[int] = CLIPImageProcessor( crop_size=224 , do_center_crop=__lowerCamelCase , do_normalize=__lowerCamelCase , do_resize=__lowerCamelCase , image_mean=[0.48145466, 0.4578275, 0.40821073] , image_std=[0.26862954, 0.26130258, 0.27577711] , resample=3 , size=224 , ) return image_processor @property def _A ( self : Tuple ): torch.manual_seed(0 ) UpperCamelCase :Dict = { """num_attention_heads""": 2, """attention_head_dim""": 16, """embedding_dim""": self.time_input_dim, """num_embeddings""": 32, """embedding_proj_dim""": self.text_embedder_hidden_size, """time_embed_dim""": self.time_embed_dim, """num_layers""": 1, """clip_embed_dim""": self.time_input_dim * 2, """additional_embeddings""": 0, """time_embed_act_fn""": """gelu""", """norm_in_type""": """layer""", """embedding_proj_norm_type""": """layer""", """encoder_hid_proj_type""": None, """added_emb_type""": None, } UpperCamelCase :int = PriorTransformer(**__lowerCamelCase ) return model @property def _A ( self : Optional[int] ): torch.manual_seed(0 ) UpperCamelCase :str = { """param_shapes""": ( (self.renderer_dim, 93), (self.renderer_dim, 8), (self.renderer_dim, 8), (self.renderer_dim, 8), ), """d_latent""": self.time_input_dim, """d_hidden""": self.renderer_dim, """n_output""": 12, """background""": ( 0.1, 0.1, 0.1, ), } UpperCamelCase :List[str] = ShapERenderer(**__lowerCamelCase ) return model def _A ( self : str ): UpperCamelCase :int = self.dummy_prior UpperCamelCase :Any = self.dummy_image_encoder UpperCamelCase :Dict = self.dummy_image_processor UpperCamelCase :List[Any] = self.dummy_renderer UpperCamelCase :int = HeunDiscreteScheduler( beta_schedule="""exp""" , num_train_timesteps=1_024 , prediction_type="""sample""" , use_karras_sigmas=__lowerCamelCase , clip_sample=__lowerCamelCase , clip_sample_range=1.0 , ) UpperCamelCase :Optional[Any] = { """prior""": prior, """image_encoder""": image_encoder, """image_processor""": image_processor, """renderer""": renderer, """scheduler""": scheduler, } return components def _A ( self : int , __lowerCamelCase : int , __lowerCamelCase : Any=0 ): UpperCamelCase :Any = floats_tensor((1, 3, 64, 64) , rng=random.Random(__lowerCamelCase ) ).to(__lowerCamelCase ) if str(__lowerCamelCase ).startswith("""mps""" ): UpperCamelCase :List[Any] = torch.manual_seed(__lowerCamelCase ) else: UpperCamelCase :Optional[int] = torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase ) UpperCamelCase :Optional[Any] = { """image""": input_image, """generator""": generator, """num_inference_steps""": 1, """frame_size""": 32, """output_type""": """np""", } return inputs def _A ( self : List[str] ): UpperCamelCase :Dict = """cpu""" UpperCamelCase :List[Any] = self.get_dummy_components() UpperCamelCase :Optional[int] = self.pipeline_class(**__lowerCamelCase ) UpperCamelCase :int = pipe.to(__lowerCamelCase ) pipe.set_progress_bar_config(disable=__lowerCamelCase ) UpperCamelCase :Optional[Any] = pipe(**self.get_dummy_inputs(__lowerCamelCase ) ) UpperCamelCase :Dict = output.images[0] UpperCamelCase :List[Any] = image[0, -3:, -3:, -1] assert image.shape == (20, 32, 32, 3) UpperCamelCase :Dict = np.array( [ 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, ] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def _A ( self : List[Any] ): # NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches self._test_inference_batch_consistent(batch_sizes=[1, 2] ) def _A ( self : List[Any] ): UpperCamelCase :str = torch_device == """cpu""" UpperCamelCase :int = True self._test_inference_batch_single_identical( batch_size=2 , test_max_difference=__lowerCamelCase , relax_max_difference=__lowerCamelCase , ) def _A ( self : List[Any] ): UpperCamelCase :List[Any] = self.get_dummy_components() UpperCamelCase :Optional[int] = self.pipeline_class(**__lowerCamelCase ) UpperCamelCase :List[Any] = pipe.to(__lowerCamelCase ) pipe.set_progress_bar_config(disable=__lowerCamelCase ) UpperCamelCase :Any = 1 UpperCamelCase :int = 2 UpperCamelCase :Union[str, Any] = self.get_dummy_inputs(__lowerCamelCase ) for key in inputs.keys(): if key in self.batch_params: UpperCamelCase :str = batch_size * [inputs[key]] UpperCamelCase :Optional[int] = pipe(**__lowerCamelCase , num_images_per_prompt=__lowerCamelCase )[0] assert images.shape[0] == batch_size * num_images_per_prompt @slow @require_torch_gpu class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): def _A ( self : Any ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def _A ( self : Any ): UpperCamelCase :Optional[Any] = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/shap_e/corgi.png""" ) UpperCamelCase :Any = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/shap_e/test_shap_e_img2img_out.npy""" ) UpperCamelCase :Union[str, Any] = ShapEImgaImgPipeline.from_pretrained("""openai/shap-e-img2img""" ) UpperCamelCase :List[str] = pipe.to(__lowerCamelCase ) pipe.set_progress_bar_config(disable=__lowerCamelCase ) UpperCamelCase :Optional[Any] = torch.Generator(device=__lowerCamelCase ).manual_seed(0 ) UpperCamelCase :Optional[int] = pipe( __lowerCamelCase , generator=__lowerCamelCase , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type="""np""" , ).images[0] assert images.shape == (20, 64, 64, 3) assert_mean_pixel_difference(__lowerCamelCase , __lowerCamelCase )
38
1
import json import os import pickle import shutil import tempfile from unittest import TestCase from unittest.mock import patch import numpy as np from datasets import Dataset from transformers import is_faiss_available from transformers.models.bart.configuration_bart import BartConfig from transformers.models.bart.tokenization_bart import BartTokenizer from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES from transformers.models.dpr.configuration_dpr import DPRConfig from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer from transformers.models.rag.configuration_rag import RagConfig from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch if is_faiss_available(): import faiss @require_faiss class _SCREAMING_SNAKE_CASE ( _a ): def _A ( self : int ): UpperCamelCase :List[Any] = tempfile.mkdtemp() UpperCamelCase :List[str] = 8 # DPR tok UpperCamelCase :Union[str, Any] = [ """[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing""", """,""", """low""", """lowest""", ] UpperCamelCase :List[Any] = os.path.join(self.tmpdirname , """dpr_tokenizer""" ) os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase ) UpperCamelCase :List[str] = os.path.join(__lowerCamelCase , DPR_VOCAB_FILES_NAMES["""vocab_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) ) # BART tok UpperCamelCase :List[str] = [ """l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """\u0120""", """\u0120l""", """\u0120n""", """\u0120lo""", """\u0120low""", """er""", """\u0120lowest""", """\u0120newer""", """\u0120wider""", """<unk>""", ] UpperCamelCase :Dict = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) ) UpperCamelCase :int = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""] UpperCamelCase :Union[str, Any] = {"""unk_token""": """<unk>"""} UpperCamelCase :Optional[int] = os.path.join(self.tmpdirname , """bart_tokenizer""" ) os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase ) UpperCamelCase :List[str] = os.path.join(__lowerCamelCase , BART_VOCAB_FILES_NAMES["""vocab_file"""] ) UpperCamelCase :Union[str, Any] = os.path.join(__lowerCamelCase , BART_VOCAB_FILES_NAMES["""merges_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp: fp.write(json.dumps(__lowerCamelCase ) + """\n""" ) with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp: fp.write("""\n""".join(__lowerCamelCase ) ) def _A ( self : List[Any] ): return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , """dpr_tokenizer""" ) ) def _A ( self : Tuple ): return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , """dpr_tokenizer""" ) ) def _A ( self : Union[str, Any] ): return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , """bart_tokenizer""" ) ) def _A ( self : Tuple ): shutil.rmtree(self.tmpdirname ) def _A ( self : Tuple ): UpperCamelCase :Optional[int] = Dataset.from_dict( { """id""": ["""0""", """1"""], """text""": ["""foo""", """bar"""], """title""": ["""Foo""", """Bar"""], """embeddings""": [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )], } ) dataset.add_faiss_index("""embeddings""" , string_factory="""Flat""" , metric_type=faiss.METRIC_INNER_PRODUCT ) return dataset def _A ( self : Tuple ): UpperCamelCase :List[str] = self.get_dummy_dataset() UpperCamelCase :List[str] = RagConfig( retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , ) with patch("""transformers.models.rag.retrieval_rag.load_dataset""" ) as mock_load_dataset: UpperCamelCase :List[Any] = dataset UpperCamelCase :Dict = RagRetriever( __lowerCamelCase , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , ) return retriever def _A ( self : Optional[Any] , __lowerCamelCase : bool ): UpperCamelCase :Union[str, Any] = self.get_dummy_dataset() UpperCamelCase :int = RagConfig( retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name="""custom""" , ) if from_disk: UpperCamelCase :Any = os.path.join(self.tmpdirname , """dataset""" ) UpperCamelCase :List[str] = os.path.join(self.tmpdirname , """index.faiss""" ) dataset.get_index("""embeddings""" ).save(os.path.join(self.tmpdirname , """index.faiss""" ) ) dataset.drop_index("""embeddings""" ) dataset.save_to_disk(os.path.join(self.tmpdirname , """dataset""" ) ) del dataset UpperCamelCase :Optional[Any] = RagRetriever( __lowerCamelCase , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , ) else: UpperCamelCase :Any = RagRetriever( __lowerCamelCase , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , __lowerCamelCase ) , ) return retriever def _A ( self : Any ): UpperCamelCase :List[Any] = Dataset.from_dict( { """id""": ["""0""", """1"""], """text""": ["""foo""", """bar"""], """title""": ["""Foo""", """Bar"""], """embeddings""": [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )], } ) dataset.add_faiss_index("""embeddings""" , string_factory="""Flat""" , metric_type=faiss.METRIC_INNER_PRODUCT ) UpperCamelCase :Optional[Any] = os.path.join(self.tmpdirname , """hf_bert_base.hnswSQ8_correct_phi_128.c_index""" ) dataset.save_faiss_index("""embeddings""" , index_file_name + """.index.dpr""" ) pickle.dump(dataset["""id"""] , open(index_file_name + """.index_meta.dpr""" , """wb""" ) ) UpperCamelCase :Union[str, Any] = os.path.join(self.tmpdirname , """psgs_w100.tsv.pkl""" ) UpperCamelCase :Optional[int] = {sample["""id"""]: [sample["""text"""], sample["""title"""]] for sample in dataset} pickle.dump(__lowerCamelCase , open(__lowerCamelCase , """wb""" ) ) UpperCamelCase :Dict = RagConfig( retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name="""legacy""" , index_path=self.tmpdirname , ) UpperCamelCase :List[Any] = RagRetriever( __lowerCamelCase , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() ) return retriever def _A ( self : Dict ): UpperCamelCase :Union[str, Any] = 1 UpperCamelCase :Dict = self.get_dummy_canonical_hf_index_retriever() UpperCamelCase :Union[str, Any] = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) UpperCamelCase , UpperCamelCase , UpperCamelCase :List[str] = retriever.retrieve(__lowerCamelCase , n_docs=__lowerCamelCase ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(__lowerCamelCase ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ["""embeddings""", """id""", """text""", """title"""] ) self.assertEqual(len(doc_dicts[0]["""id"""] ) , __lowerCamelCase ) self.assertEqual(doc_dicts[0]["""id"""][0] , """1""" ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]["""id"""][0] , """0""" ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def _A ( self : Optional[Any] ): UpperCamelCase :List[Any] = self.get_dummy_canonical_hf_index_retriever() with tempfile.TemporaryDirectory() as tmp_dirname: with patch("""transformers.models.rag.retrieval_rag.load_dataset""" ) as mock_load_dataset: UpperCamelCase :List[str] = self.get_dummy_dataset() retriever.save_pretrained(__lowerCamelCase ) UpperCamelCase :int = RagRetriever.from_pretrained(__lowerCamelCase ) self.assertIsInstance(__lowerCamelCase , __lowerCamelCase ) UpperCamelCase :List[Any] = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) UpperCamelCase :Dict = retriever.retrieve(__lowerCamelCase , n_docs=1 ) self.assertTrue(out is not None ) def _A ( self : Tuple ): UpperCamelCase :int = 1 UpperCamelCase :Tuple = self.get_dummy_custom_hf_index_retriever(from_disk=__lowerCamelCase ) UpperCamelCase :Union[str, Any] = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) UpperCamelCase , UpperCamelCase , UpperCamelCase :Union[str, Any] = retriever.retrieve(__lowerCamelCase , n_docs=__lowerCamelCase ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(__lowerCamelCase ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ["""embeddings""", """id""", """text""", """title"""] ) self.assertEqual(len(doc_dicts[0]["""id"""] ) , __lowerCamelCase ) self.assertEqual(doc_dicts[0]["""id"""][0] , """1""" ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]["""id"""][0] , """0""" ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def _A ( self : Optional[int] ): UpperCamelCase :Union[str, Any] = self.get_dummy_custom_hf_index_retriever(from_disk=__lowerCamelCase ) with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(__lowerCamelCase ) UpperCamelCase :str = RagRetriever.from_pretrained(__lowerCamelCase ) self.assertIsInstance(__lowerCamelCase , __lowerCamelCase ) UpperCamelCase :List[str] = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) UpperCamelCase :Optional[int] = retriever.retrieve(__lowerCamelCase , n_docs=1 ) self.assertTrue(out is not None ) def _A ( self : Any ): UpperCamelCase :List[str] = 1 UpperCamelCase :Dict = self.get_dummy_custom_hf_index_retriever(from_disk=__lowerCamelCase ) UpperCamelCase :Union[str, Any] = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) UpperCamelCase , UpperCamelCase , UpperCamelCase :str = retriever.retrieve(__lowerCamelCase , n_docs=__lowerCamelCase ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(__lowerCamelCase ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ["""embeddings""", """id""", """text""", """title"""] ) self.assertEqual(len(doc_dicts[0]["""id"""] ) , __lowerCamelCase ) self.assertEqual(doc_dicts[0]["""id"""][0] , """1""" ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]["""id"""][0] , """0""" ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def _A ( self : List[Any] ): UpperCamelCase :List[Any] = self.get_dummy_custom_hf_index_retriever(from_disk=__lowerCamelCase ) with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(__lowerCamelCase ) UpperCamelCase :Optional[int] = RagRetriever.from_pretrained(__lowerCamelCase ) self.assertIsInstance(__lowerCamelCase , __lowerCamelCase ) UpperCamelCase :Tuple = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) UpperCamelCase :Any = retriever.retrieve(__lowerCamelCase , n_docs=1 ) self.assertTrue(out is not None ) def _A ( self : Optional[Any] ): UpperCamelCase :Any = 1 UpperCamelCase :List[Any] = self.get_dummy_legacy_index_retriever() UpperCamelCase :Dict = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) UpperCamelCase , UpperCamelCase , UpperCamelCase :Any = retriever.retrieve(__lowerCamelCase , n_docs=__lowerCamelCase ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(__lowerCamelCase ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ["""text""", """title"""] ) self.assertEqual(len(doc_dicts[0]["""text"""] ) , __lowerCamelCase ) self.assertEqual(doc_dicts[0]["""text"""][0] , """bar""" ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]["""text"""][0] , """foo""" ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def _A ( self : Optional[int] ): UpperCamelCase :Optional[int] = self.get_dummy_legacy_index_retriever() with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(__lowerCamelCase ) UpperCamelCase :List[Any] = RagRetriever.from_pretrained(__lowerCamelCase ) self.assertIsInstance(__lowerCamelCase , __lowerCamelCase ) UpperCamelCase :Optional[Any] = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) UpperCamelCase :Tuple = retriever.retrieve(__lowerCamelCase , n_docs=1 ) self.assertTrue(out is not None ) @require_torch @require_tokenizers @require_sentencepiece def _A ( self : Any ): import torch UpperCamelCase :List[Any] = 1 UpperCamelCase :List[Any] = self.get_dummy_canonical_hf_index_retriever() UpperCamelCase :int = [[5, 7], [10, 11]] UpperCamelCase :Dict = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) UpperCamelCase :Optional[int] = retriever(__lowerCamelCase , __lowerCamelCase , prefix=retriever.config.generator.prefix , n_docs=__lowerCamelCase ) UpperCamelCase , UpperCamelCase , UpperCamelCase :Optional[int] = ( out["""context_input_ids"""], out["""context_attention_mask"""], out["""retrieved_doc_embeds"""], ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertIsInstance(__lowerCamelCase , __lowerCamelCase ) self.assertIsInstance(__lowerCamelCase , __lowerCamelCase ) self.assertIsInstance(__lowerCamelCase , np.ndarray ) UpperCamelCase :List[str] = retriever( __lowerCamelCase , __lowerCamelCase , prefix=retriever.config.generator.prefix , n_docs=__lowerCamelCase , return_tensors="""pt""" , ) UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase :Optional[int] = ( # noqa: F841 out["""context_input_ids"""], out["""context_attention_mask"""], out["""retrieved_doc_embeds"""], out["""doc_ids"""], ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertIsInstance(__lowerCamelCase , torch.Tensor ) self.assertIsInstance(__lowerCamelCase , torch.Tensor ) self.assertIsInstance(__lowerCamelCase , torch.Tensor ) @require_torch @require_tokenizers @require_sentencepiece def _A ( self : List[str] ): UpperCamelCase :Dict = self.get_dpr_ctx_encoder_tokenizer() UpperCamelCase :Dict = 1 UpperCamelCase :List[Any] = self.get_dummy_custom_hf_index_retriever(from_disk=__lowerCamelCase ) retriever.set_ctx_encoder_tokenizer(__lowerCamelCase ) UpperCamelCase :int = [[5, 7], [10, 11]] UpperCamelCase :List[Any] = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) UpperCamelCase :Optional[Any] = retriever(__lowerCamelCase , __lowerCamelCase , prefix=retriever.config.generator.prefix , n_docs=__lowerCamelCase ) self.assertEqual( len(__lowerCamelCase ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs self.assertEqual( all(k in out for k in ("""tokenized_doc_ids""", """tokenized_doc_attention_mask""") ) , __lowerCamelCase ) # check for doc token related keys in dictionary.
38
from sklearn.metrics import fa_score, matthews_corrcoef import datasets from .record_evaluation import evaluate as evaluate_record UpperCAmelCase_ : int = '''\ @article{wang2019superglue, title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems}, author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R}, journal={arXiv preprint arXiv:1905.00537}, year={2019} } ''' UpperCAmelCase_ : Optional[Any] = '''\ SuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after GLUE with a new set of more difficult language understanding tasks, improved resources, and a new public leaderboard. ''' UpperCAmelCase_ : int = ''' Compute SuperGLUE evaluation metric associated to each SuperGLUE dataset. Args: predictions: list of predictions to score. Depending on the SuperGlUE subset: - for \'record\': list of question-answer dictionaries with the following keys: - \'idx\': index of the question as specified by the dataset - \'prediction_text\': the predicted answer text - for \'multirc\': list of question-answer dictionaries with the following keys: - \'idx\': index of the question-answer pair as specified by the dataset - \'prediction\': the predicted answer label - otherwise: list of predicted labels references: list of reference labels. Depending on the SuperGLUE subset: - for \'record\': list of question-answers dictionaries with the following keys: - \'idx\': index of the question as specified by the dataset - \'answers\': list of possible answers - otherwise: list of reference labels Returns: depending on the SuperGLUE subset: - for \'record\': - \'exact_match\': Exact match between answer and gold answer - \'f1\': F1 score - for \'multirc\': - \'exact_match\': Exact match between answer and gold answer - \'f1_m\': Per-question macro-F1 score - \'f1_a\': Average F1 score over all answers - for \'axb\': \'matthews_correlation\': Matthew Correlation - for \'cb\': - \'accuracy\': Accuracy - \'f1\': F1 score - for all others: - \'accuracy\': Accuracy Examples: >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'copa\') # any of ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"] >>> predictions = [0, 1] >>> references = [0, 1] >>> results = super_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {\'accuracy\': 1.0} >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'cb\') >>> predictions = [0, 1] >>> references = [0, 1] >>> results = super_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {\'accuracy\': 1.0, \'f1\': 1.0} >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'record\') >>> predictions = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'prediction_text\': \'answer\'}] >>> references = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'answers\': [\'answer\', \'another_answer\']}] >>> results = super_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {\'exact_match\': 1.0, \'f1\': 1.0} >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'multirc\') >>> predictions = [{\'idx\': {\'answer\': 0, \'paragraph\': 0, \'question\': 0}, \'prediction\': 0}, {\'idx\': {\'answer\': 1, \'paragraph\': 2, \'question\': 3}, \'prediction\': 1}] >>> references = [0, 1] >>> results = super_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {\'exact_match\': 1.0, \'f1_m\': 1.0, \'f1_a\': 1.0} >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'axb\') >>> references = [0, 1] >>> predictions = [0, 1] >>> results = super_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {\'matthews_correlation\': 1.0} ''' def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Optional[Any] , __magic_name__ : Optional[Any] ) -> Union[str, Any]: """simple docstring""" return float((preds == labels).mean() ) def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Dict , __magic_name__ : int , __magic_name__ : Any="binary" ) -> Dict: """simple docstring""" UpperCamelCase :List[str] = simple_accuracy(__magic_name__ , __magic_name__ ) UpperCamelCase :Dict = float(fa_score(y_true=__magic_name__ , y_pred=__magic_name__ , average=__magic_name__ ) ) return { "accuracy": acc, "f1": fa, } def SCREAMING_SNAKE_CASE_ ( __magic_name__ : List[Any] , __magic_name__ : Optional[Any] ) -> Optional[Any]: """simple docstring""" UpperCamelCase :Optional[Any] = {} for id_pred, label in zip(__magic_name__ , __magic_name__ ): UpperCamelCase :str = f"""{id_pred['idx']['paragraph']}-{id_pred['idx']['question']}""" UpperCamelCase :Union[str, Any] = id_pred["""prediction"""] if question_id in question_map: question_map[question_id].append((pred, label) ) else: UpperCamelCase :Dict = [(pred, label)] UpperCamelCase , UpperCamelCase :Optional[int] = [], [] for question, preds_labels in question_map.items(): UpperCamelCase , UpperCamelCase :Optional[Any] = zip(*__magic_name__ ) UpperCamelCase :Optional[int] = fa_score(y_true=__magic_name__ , y_pred=__magic_name__ , average="""macro""" ) fas.append(__magic_name__ ) UpperCamelCase :int = int(sum(pred == label for pred, label in preds_labels ) == len(__magic_name__ ) ) ems.append(__magic_name__ ) UpperCamelCase :Optional[int] = float(sum(__magic_name__ ) / len(__magic_name__ ) ) UpperCamelCase :str = sum(__magic_name__ ) / len(__magic_name__ ) UpperCamelCase :Tuple = float(fa_score(y_true=__magic_name__ , y_pred=[id_pred["""prediction"""] for id_pred in ids_preds] ) ) return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a} @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class _SCREAMING_SNAKE_CASE ( datasets.Metric ): def _A ( self : str ): if self.config_name not in [ "boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg", ]: raise KeyError( """You should supply a configuration name selected in """ """[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]""" ) return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format="""numpy""" if not self.config_name == """record""" and not self.config_name == """multirc""" else None , ) def _A ( self : Optional[Any] ): if self.config_name == "record": return { "predictions": { "idx": { "passage": datasets.Value("""int64""" ), "query": datasets.Value("""int64""" ), }, "prediction_text": datasets.Value("""string""" ), }, "references": { "idx": { "passage": datasets.Value("""int64""" ), "query": datasets.Value("""int64""" ), }, "answers": datasets.Sequence(datasets.Value("""string""" ) ), }, } elif self.config_name == "multirc": return { "predictions": { "idx": { "answer": datasets.Value("""int64""" ), "paragraph": datasets.Value("""int64""" ), "question": datasets.Value("""int64""" ), }, "prediction": datasets.Value("""int64""" ), }, "references": datasets.Value("""int64""" ), } else: return { "predictions": datasets.Value("""int64""" ), "references": datasets.Value("""int64""" ), } def _A ( self : List[str] , __lowerCamelCase : Tuple , __lowerCamelCase : str ): if self.config_name == "axb": return {"matthews_correlation": matthews_corrcoef(__lowerCamelCase , __lowerCamelCase )} elif self.config_name == "cb": return acc_and_fa(__lowerCamelCase , __lowerCamelCase , fa_avg="""macro""" ) elif self.config_name == "record": UpperCamelCase :Optional[Any] = [ { """qas""": [ {"""id""": ref["""idx"""]["""query"""], """answers""": [{"""text""": ans} for ans in ref["""answers"""]]} for ref in references ] } ] UpperCamelCase :Tuple = {pred["""idx"""]["""query"""]: pred["""prediction_text"""] for pred in predictions} return evaluate_record(__lowerCamelCase , __lowerCamelCase )[0] elif self.config_name == "multirc": return evaluate_multirc(__lowerCamelCase , __lowerCamelCase ) elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]: return {"accuracy": simple_accuracy(__lowerCamelCase , __lowerCamelCase )} else: raise KeyError( """You should supply a configuration name selected in """ """[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]""" )
38
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) UpperCAmelCase_ : Dict = { '''configuration_rembert''': ['''REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RemBertConfig''', '''RemBertOnnxConfig'''] } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ : Dict = ['''RemBertTokenizer'''] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ : int = ['''RemBertTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ : Union[str, Any] = [ '''REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''RemBertForCausalLM''', '''RemBertForMaskedLM''', '''RemBertForMultipleChoice''', '''RemBertForQuestionAnswering''', '''RemBertForSequenceClassification''', '''RemBertForTokenClassification''', '''RemBertLayer''', '''RemBertModel''', '''RemBertPreTrainedModel''', '''load_tf_weights_in_rembert''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ : Optional[int] = [ '''TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFRemBertForCausalLM''', '''TFRemBertForMaskedLM''', '''TFRemBertForMultipleChoice''', '''TFRemBertForQuestionAnswering''', '''TFRemBertForSequenceClassification''', '''TFRemBertForTokenClassification''', '''TFRemBertLayer''', '''TFRemBertModel''', '''TFRemBertPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_rembert import REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RemBertConfig, RemBertOnnxConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_rembert import RemBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_rembert_fast import RemBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_rembert import ( REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST, RemBertForCausalLM, RemBertForMaskedLM, RemBertForMultipleChoice, RemBertForQuestionAnswering, RemBertForSequenceClassification, RemBertForTokenClassification, RemBertLayer, RemBertModel, RemBertPreTrainedModel, load_tf_weights_in_rembert, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_rembert import ( TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFRemBertForCausalLM, TFRemBertForMaskedLM, TFRemBertForMultipleChoice, TFRemBertForQuestionAnswering, TFRemBertForSequenceClassification, TFRemBertForTokenClassification, TFRemBertLayer, TFRemBertModel, TFRemBertPreTrainedModel, ) else: import sys UpperCAmelCase_ : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
38
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): def __init__( self : List[str] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Any=13 , __lowerCamelCase : Dict=3 , __lowerCamelCase : int=224 , __lowerCamelCase : Any=30 , __lowerCamelCase : Tuple=400 , __lowerCamelCase : int=True , __lowerCamelCase : List[str]=None , __lowerCamelCase : Any=True , __lowerCamelCase : Dict=[0.5, 0.5, 0.5] , __lowerCamelCase : List[Any]=[0.5, 0.5, 0.5] , ): UpperCamelCase :List[Any] = size if size is not None else {"""height""": 18, """width""": 18} UpperCamelCase :str = parent UpperCamelCase :Optional[int] = batch_size UpperCamelCase :Dict = num_channels UpperCamelCase :str = image_size UpperCamelCase :Dict = min_resolution UpperCamelCase :str = max_resolution UpperCamelCase :Union[str, Any] = do_resize UpperCamelCase :Optional[Any] = size UpperCamelCase :Any = do_normalize UpperCamelCase :Optional[Any] = image_mean UpperCamelCase :Tuple = image_std def _A ( self : int ): return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, } @require_torch @require_vision class _SCREAMING_SNAKE_CASE ( _a , unittest.TestCase ): snake_case__ : List[Any] = ViTImageProcessor if is_vision_available() else None def _A ( self : str ): UpperCamelCase :Tuple = EfficientFormerImageProcessorTester(self ) @property def _A ( self : List[str] ): return self.image_proc_tester.prepare_image_processor_dict() def _A ( self : int ): UpperCamelCase :List[Any] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__lowerCamelCase , """image_mean""" ) ) self.assertTrue(hasattr(__lowerCamelCase , """image_std""" ) ) self.assertTrue(hasattr(__lowerCamelCase , """do_normalize""" ) ) self.assertTrue(hasattr(__lowerCamelCase , """do_resize""" ) ) self.assertTrue(hasattr(__lowerCamelCase , """size""" ) ) def _A ( self : Optional[int] ): pass def _A ( self : str ): # Initialize image_processor UpperCamelCase :Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images UpperCamelCase :Union[str, Any] = prepare_image_inputs(self.image_proc_tester , equal_resolution=__lowerCamelCase ) for image in image_inputs: self.assertIsInstance(__lowerCamelCase , Image.Image ) # Test not batched input UpperCamelCase :List[str] = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size["""height"""], self.image_proc_tester.size["""width"""], ) , ) # Test batched UpperCamelCase :List[Any] = image_processor(__lowerCamelCase , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size["""height"""], self.image_proc_tester.size["""width"""], ) , ) def _A ( self : Union[str, Any] ): # Initialize image_processor UpperCamelCase :Optional[int] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors UpperCamelCase :List[Any] = prepare_image_inputs(self.image_proc_tester , equal_resolution=__lowerCamelCase , numpify=__lowerCamelCase ) for image in image_inputs: self.assertIsInstance(__lowerCamelCase , np.ndarray ) # Test not batched input UpperCamelCase :Dict = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size["""height"""], self.image_proc_tester.size["""width"""], ) , ) # Test batched UpperCamelCase :Tuple = image_processor(__lowerCamelCase , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size["""height"""], self.image_proc_tester.size["""width"""], ) , ) def _A ( self : List[Any] ): # Initialize image_processor UpperCamelCase :List[str] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors UpperCamelCase :Any = prepare_image_inputs(self.image_proc_tester , equal_resolution=__lowerCamelCase , torchify=__lowerCamelCase ) for image in image_inputs: self.assertIsInstance(__lowerCamelCase , torch.Tensor ) # Test not batched input UpperCamelCase :List[Any] = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size["""height"""], self.image_proc_tester.size["""width"""], ) , ) # Test batched UpperCamelCase :str = image_processor(__lowerCamelCase , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size["""height"""], self.image_proc_tester.size["""width"""], ) , )
38
1
import json import os import torch from diffusers import UNetaDModel os.makedirs('''hub/hopper-medium-v2/unet/hor32''', exist_ok=True) os.makedirs('''hub/hopper-medium-v2/unet/hor128''', exist_ok=True) os.makedirs('''hub/hopper-medium-v2/value_function''', exist_ok=True) def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Optional[int] ) -> Optional[Any]: """simple docstring""" if hor == 128: UpperCamelCase :List[Any] = ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""") UpperCamelCase :Optional[Any] = (32, 128, 256) UpperCamelCase :int = ("""UpResnetBlock1D""", """UpResnetBlock1D""") elif hor == 32: UpperCamelCase :str = ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""") UpperCamelCase :List[Any] = (32, 64, 128, 256) UpperCamelCase :List[Any] = ("""UpResnetBlock1D""", """UpResnetBlock1D""", """UpResnetBlock1D""") UpperCamelCase :str = torch.load(f"""/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch""" ) UpperCamelCase :List[Any] = model.state_dict() UpperCamelCase :Union[str, Any] = { """down_block_types""": down_block_types, """block_out_channels""": block_out_channels, """up_block_types""": up_block_types, """layers_per_block""": 1, """use_timestep_embedding""": True, """out_block_type""": """OutConv1DBlock""", """norm_num_groups""": 8, """downsample_each_block""": False, """in_channels""": 14, """out_channels""": 14, """extra_in_channels""": 0, """time_embedding_type""": """positional""", """flip_sin_to_cos""": False, """freq_shift""": 1, """sample_size""": 6_5536, """mid_block_type""": """MidResTemporalBlock1D""", """act_fn""": """mish""", } UpperCamelCase :List[str] = UNetaDModel(**__magic_name__ ) print(f"""length of state dict: {len(state_dict.keys() )}""" ) print(f"""length of value function dict: {len(hf_value_function.state_dict().keys() )}""" ) UpperCamelCase :Optional[int] = dict(zip(model.state_dict().keys() , hf_value_function.state_dict().keys() ) ) for k, v in mapping.items(): UpperCamelCase :str = state_dict.pop(__magic_name__ ) hf_value_function.load_state_dict(__magic_name__ ) torch.save(hf_value_function.state_dict() , f"""hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin""" ) with open(f"""hub/hopper-medium-v2/unet/hor{hor}/config.json""" , """w""" ) as f: json.dump(__magic_name__ , __magic_name__ ) def SCREAMING_SNAKE_CASE_ ( ) -> Dict: """simple docstring""" UpperCamelCase :Union[str, Any] = { """in_channels""": 14, """down_block_types""": ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D"""), """up_block_types""": (), """out_block_type""": """ValueFunction""", """mid_block_type""": """ValueFunctionMidBlock1D""", """block_out_channels""": (32, 64, 128, 256), """layers_per_block""": 1, """downsample_each_block""": True, """sample_size""": 6_5536, """out_channels""": 14, """extra_in_channels""": 0, """time_embedding_type""": """positional""", """use_timestep_embedding""": True, """flip_sin_to_cos""": False, """freq_shift""": 1, """norm_num_groups""": 8, """act_fn""": """mish""", } UpperCamelCase :List[Any] = torch.load("""/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch""" ) UpperCamelCase :str = model UpperCamelCase :int = UNetaDModel(**__magic_name__ ) print(f"""length of state dict: {len(state_dict.keys() )}""" ) print(f"""length of value function dict: {len(hf_value_function.state_dict().keys() )}""" ) UpperCamelCase :Tuple = dict(zip(state_dict.keys() , hf_value_function.state_dict().keys() ) ) for k, v in mapping.items(): UpperCamelCase :Optional[int] = state_dict.pop(__magic_name__ ) hf_value_function.load_state_dict(__magic_name__ ) torch.save(hf_value_function.state_dict() , """hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin""" ) with open("""hub/hopper-medium-v2/value_function/config.json""" , """w""" ) as f: json.dump(__magic_name__ , __magic_name__ ) if __name__ == "__main__": unet(32) # unet(128) value_function()
38
from collections.abc import Generator from math import sin def SCREAMING_SNAKE_CASE_ ( __magic_name__ : bytes ) -> bytes: """simple docstring""" if len(__magic_name__ ) != 32: raise ValueError("""Input must be of length 32""" ) UpperCamelCase :int = B"""""" for i in [3, 2, 1, 0]: little_endian += string_aa[8 * i : 8 * i + 8] return little_endian def SCREAMING_SNAKE_CASE_ ( __magic_name__ : int ) -> bytes: """simple docstring""" if i < 0: raise ValueError("""Input must be non-negative""" ) UpperCamelCase :Any = format(__magic_name__ , """08x""" )[-8:] UpperCamelCase :Union[str, Any] = B"""""" for i in [3, 2, 1, 0]: little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode("""utf-8""" ) return little_endian_hex def SCREAMING_SNAKE_CASE_ ( __magic_name__ : bytes ) -> bytes: """simple docstring""" UpperCamelCase :str = B"""""" for char in message: bit_string += format(__magic_name__ , """08b""" ).encode("""utf-8""" ) UpperCamelCase :Any = format(len(__magic_name__ ) , """064b""" ).encode("""utf-8""" ) # Pad bit_string to a multiple of 512 chars bit_string += b"1" while len(__magic_name__ ) % 512 != 448: bit_string += b"0" bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] ) return bit_string def SCREAMING_SNAKE_CASE_ ( __magic_name__ : bytes ) -> Generator[list[int], None, None]: """simple docstring""" if len(__magic_name__ ) % 512 != 0: raise ValueError("""Input must have length that's a multiple of 512""" ) for pos in range(0 , len(__magic_name__ ) , 512 ): UpperCamelCase :Tuple = bit_string[pos : pos + 512] UpperCamelCase :Optional[int] = [] for i in range(0 , 512 , 32 ): block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) ) yield block_words def SCREAMING_SNAKE_CASE_ ( __magic_name__ : int ) -> int: """simple docstring""" if i < 0: raise ValueError("""Input must be non-negative""" ) UpperCamelCase :List[str] = format(__magic_name__ , """032b""" ) UpperCamelCase :Any = """""" for c in i_str: new_str += "1" if c == "0" else "0" return int(__magic_name__ , 2 ) def SCREAMING_SNAKE_CASE_ ( __magic_name__ : int , __magic_name__ : int ) -> int: """simple docstring""" return (a + b) % 2**32 def SCREAMING_SNAKE_CASE_ ( __magic_name__ : int , __magic_name__ : int ) -> int: """simple docstring""" if i < 0: raise ValueError("""Input must be non-negative""" ) if shift < 0: raise ValueError("""Shift must be non-negative""" ) return ((i << shift) ^ (i >> (32 - shift))) % 2**32 def SCREAMING_SNAKE_CASE_ ( __magic_name__ : bytes ) -> bytes: """simple docstring""" UpperCamelCase :Tuple = preprocess(__magic_name__ ) UpperCamelCase :List[str] = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )] # Starting states UpperCamelCase :Union[str, Any] = 0X67_45_23_01 UpperCamelCase :Union[str, Any] = 0XEF_CD_AB_89 UpperCamelCase :List[str] = 0X98_BA_DC_FE UpperCamelCase :int = 0X10_32_54_76 UpperCamelCase :int = [ 7, 12, 17, 22, 7, 12, 17, 22, 7, 12, 17, 22, 7, 12, 17, 22, 5, 9, 14, 20, 5, 9, 14, 20, 5, 9, 14, 20, 5, 9, 14, 20, 4, 11, 16, 23, 4, 11, 16, 23, 4, 11, 16, 23, 4, 11, 16, 23, 6, 10, 15, 21, 6, 10, 15, 21, 6, 10, 15, 21, 6, 10, 15, 21, ] # Process bit string in chunks, each with 16 32-char words for block_words in get_block_words(__magic_name__ ): UpperCamelCase :Optional[Any] = aa UpperCamelCase :Any = ba UpperCamelCase :Tuple = ca UpperCamelCase :List[str] = da # Hash current chunk for i in range(64 ): if i <= 15: # f = (b & c) | (not_32(b) & d) # Alternate definition for f UpperCamelCase :int = d ^ (b & (c ^ d)) UpperCamelCase :Optional[int] = i elif i <= 31: # f = (d & b) | (not_32(d) & c) # Alternate definition for f UpperCamelCase :str = c ^ (d & (b ^ c)) UpperCamelCase :Union[str, Any] = (5 * i + 1) % 16 elif i <= 47: UpperCamelCase :str = b ^ c ^ d UpperCamelCase :Optional[int] = (3 * i + 5) % 16 else: UpperCamelCase :List[str] = c ^ (b | not_aa(__magic_name__ )) UpperCamelCase :int = (7 * i) % 16 UpperCamelCase :Dict = (f + a + added_consts[i] + block_words[g]) % 2**32 UpperCamelCase :Tuple = d UpperCamelCase :str = c UpperCamelCase :Tuple = b UpperCamelCase :Optional[Any] = sum_aa(__magic_name__ , left_rotate_aa(__magic_name__ , shift_amounts[i] ) ) # Add hashed chunk to running total UpperCamelCase :List[str] = sum_aa(__magic_name__ , __magic_name__ ) UpperCamelCase :str = sum_aa(__magic_name__ , __magic_name__ ) UpperCamelCase :int = sum_aa(__magic_name__ , __magic_name__ ) UpperCamelCase :Optional[Any] = sum_aa(__magic_name__ , __magic_name__ ) UpperCamelCase :Optional[Any] = reformat_hex(__magic_name__ ) + reformat_hex(__magic_name__ ) + reformat_hex(__magic_name__ ) + reformat_hex(__magic_name__ ) return digest if __name__ == "__main__": import doctest doctest.testmod()
38
1
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( SwiftFormerConfig, SwiftFormerForImageClassification, ViTImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() UpperCAmelCase_ : Optional[int] = logging.get_logger(__name__) UpperCAmelCase_ : List[str] = torch.device('''cpu''') def SCREAMING_SNAKE_CASE_ ( ) -> List[Any]: """simple docstring""" UpperCamelCase :Dict = """http://images.cocodataset.org/val2017/000000039769.jpg""" UpperCamelCase :Any = Image.open(requests.get(__magic_name__ , stream=__magic_name__ ).raw ) return im def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Tuple ) -> Dict: """simple docstring""" if swiftformer_name == "swiftformer_xs": return torch.tensor([-2.1703E00, 2.1107E00, -2.0811E00, 8.8685E-01, 2.4360E-01] ) elif swiftformer_name == "swiftformer_s": return torch.tensor([3.9636E-01, 2.3478E-01, -1.6963E00, -1.7381E00, -8.6337E-01] ) elif swiftformer_name == "swiftformer_l1": return torch.tensor([-4.2768E-01, -4.7429E-01, -1.0897E00, -1.0248E00, 3.5523E-02] ) elif swiftformer_name == "swiftformer_l3": return torch.tensor([-2.5330E-01, 2.4211E-01, -6.0185E-01, -8.2789E-01, -6.0446E-02] ) def SCREAMING_SNAKE_CASE_ ( __magic_name__ : str , __magic_name__ : Dict , __magic_name__ : Union[str, Any] ) -> Tuple: """simple docstring""" UpperCamelCase :List[Any] = dct.pop(__magic_name__ ) UpperCamelCase :Optional[Any] = val def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Tuple ) -> Optional[int]: """simple docstring""" UpperCamelCase :Union[str, Any] = [] for k in state_dict.keys(): UpperCamelCase :int = k if ".pwconv" in k: UpperCamelCase :Any = k_new.replace(""".pwconv""" , """.point_wise_conv""" ) if ".dwconv" in k: UpperCamelCase :List[Any] = k_new.replace(""".dwconv""" , """.depth_wise_conv""" ) if ".Proj." in k: UpperCamelCase :Tuple = k_new.replace(""".Proj.""" , """.proj.""" ) if "patch_embed" in k_new: UpperCamelCase :Dict = k_new.replace("""patch_embed""" , """swiftformer.patch_embed.patch_embedding""" ) if "network" in k_new: UpperCamelCase :Tuple = k_new.split(""".""" ) if ls[2].isdigit(): UpperCamelCase :Optional[int] = """swiftformer.encoder.network.""" + ls[1] + """.blocks.""" + ls[2] + """.""" + """.""".join(ls[3:] ) else: UpperCamelCase :Optional[int] = k_new.replace("""network""" , """swiftformer.encoder.network""" ) rename_keys.append((k, k_new) ) return rename_keys @torch.no_grad() def SCREAMING_SNAKE_CASE_ ( __magic_name__ : List[Any] , __magic_name__ : Dict , __magic_name__ : List[str] ) -> Optional[int]: """simple docstring""" UpperCamelCase :Dict = SwiftFormerConfig() # dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size UpperCamelCase :str = 1000 UpperCamelCase :Optional[int] = """huggingface/label-files""" UpperCamelCase :int = """imagenet-1k-id2label.json""" UpperCamelCase :List[Any] = json.load(open(hf_hub_download(__magic_name__ , __magic_name__ , repo_type="""dataset""" ) , """r""" ) ) UpperCamelCase :Optional[Any] = {int(__magic_name__ ): v for k, v in idalabel.items()} UpperCamelCase :Any = idalabel UpperCamelCase :Dict = {v: k for k, v in idalabel.items()} # size of the architecture if swiftformer_name == "swiftformer_xs": UpperCamelCase :List[Any] = [3, 3, 6, 4] UpperCamelCase :Union[str, Any] = [48, 56, 112, 220] elif swiftformer_name == "swiftformer_s": UpperCamelCase :Optional[Any] = [3, 3, 9, 6] UpperCamelCase :Dict = [48, 64, 168, 224] elif swiftformer_name == "swiftformer_l1": UpperCamelCase :Optional[int] = [4, 3, 10, 5] UpperCamelCase :str = [48, 96, 192, 384] elif swiftformer_name == "swiftformer_l3": UpperCamelCase :Any = [4, 4, 12, 6] UpperCamelCase :Any = [64, 128, 320, 512] # load state_dict of original model, remove and rename some keys if original_ckpt: if original_ckpt.startswith("""https""" ): UpperCamelCase :Optional[Any] = torch.hub.load_state_dict_from_url(__magic_name__ , map_location="""cpu""" , check_hash=__magic_name__ ) else: UpperCamelCase :List[str] = torch.load(__magic_name__ , map_location="""cpu""" ) UpperCamelCase :Optional[int] = checkpoint UpperCamelCase :Optional[int] = create_rename_keys(__magic_name__ ) for rename_key_src, rename_key_dest in rename_keys: rename_key(__magic_name__ , __magic_name__ , __magic_name__ ) # load HuggingFace model UpperCamelCase :Tuple = SwiftFormerForImageClassification(__magic_name__ ).eval() hf_model.load_state_dict(__magic_name__ ) # prepare test inputs UpperCamelCase :List[Any] = prepare_img() UpperCamelCase :int = ViTImageProcessor.from_pretrained("""preprocessor_config""" ) UpperCamelCase :int = processor(images=__magic_name__ , return_tensors="""pt""" ) # compare outputs from both models UpperCamelCase :str = get_expected_output(__magic_name__ ) UpperCamelCase :Optional[int] = hf_model(inputs["""pixel_values"""] ).logits assert hf_logits.shape == torch.Size([1, 1000] ) assert torch.allclose(hf_logits[0, 0:5] , __magic_name__ , atol=1E-3 ) Path(__magic_name__ ).mkdir(exist_ok=__magic_name__ ) print(f"""Saving model {swiftformer_name} to {pytorch_dump_folder_path}""" ) hf_model.save_pretrained(__magic_name__ ) if __name__ == "__main__": UpperCAmelCase_ : Optional[int] = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--swiftformer_name''', default='''swiftformer_xs''', choices=['''swiftformer_xs''', '''swiftformer_s''', '''swiftformer_l1''', '''swiftformer_l3'''], type=str, help='''Name of the SwiftFormer model you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default='''./converted_outputs/''', type=str, help='''Path to the output PyTorch model directory.''', ) parser.add_argument('''--original_ckpt''', default=None, type=str, help='''Path to the original model checkpoint.''') UpperCAmelCase_ : Union[str, Any] = parser.parse_args() convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
38
from typing import Callable, Optional from .. import Features from ..packaged_modules.generator.generator import Generator from .abc import AbstractDatasetInputStream class _SCREAMING_SNAKE_CASE ( _a ): def __init__( self : List[Any] , __lowerCamelCase : Callable , __lowerCamelCase : Optional[Features] = None , __lowerCamelCase : str = None , __lowerCamelCase : bool = False , __lowerCamelCase : bool = False , __lowerCamelCase : Optional[dict] = None , __lowerCamelCase : Optional[int] = None , **__lowerCamelCase : List[Any] , ): super().__init__( features=__lowerCamelCase , cache_dir=__lowerCamelCase , keep_in_memory=__lowerCamelCase , streaming=__lowerCamelCase , num_proc=__lowerCamelCase , **__lowerCamelCase , ) UpperCamelCase :Union[str, Any] = Generator( cache_dir=__lowerCamelCase , features=__lowerCamelCase , generator=__lowerCamelCase , gen_kwargs=__lowerCamelCase , **__lowerCamelCase , ) def _A ( self : List[str] ): # Build iterable dataset if self.streaming: UpperCamelCase :Any = self.builder.as_streaming_dataset(split="""train""" ) # Build regular (map-style) dataset else: UpperCamelCase :Tuple = None UpperCamelCase :Dict = None UpperCamelCase :Dict = None UpperCamelCase :List[str] = None self.builder.download_and_prepare( download_config=__lowerCamelCase , download_mode=__lowerCamelCase , verification_mode=__lowerCamelCase , base_path=__lowerCamelCase , num_proc=self.num_proc , ) UpperCamelCase :Tuple = self.builder.as_dataset( split="""train""" , verification_mode=__lowerCamelCase , in_memory=self.keep_in_memory ) return dataset
38
1
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on import numpy # noqa: F401 # Here to have a nice missing dependency error message early on import requests # noqa: F401 # Here to have a nice missing dependency error message early on import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on from mauve import compute_mauve # From: mauve-text import datasets UpperCAmelCase_ : List[str] = '''\ @inproceedings{pillutla-etal:mauve:neurips2021, title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers}, author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid}, booktitle = {NeurIPS}, year = {2021} } ''' UpperCAmelCase_ : Optional[Any] = '''\ MAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure. MAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences. For details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021). This metrics is a wrapper around the official implementation of MAUVE: https://github.com/krishnap25/mauve ''' UpperCAmelCase_ : Dict = ''' Calculates MAUVE scores between two lists of generated text and reference text. Args: predictions: list of generated text to score. Each predictions should be a string with tokens separated by spaces. references: list of reference for each prediction. Each reference should be a string with tokens separated by spaces. Optional Args: num_buckets: the size of the histogram to quantize P and Q. Options: \'auto\' (default) or an integer pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1 kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9 kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5 kmeans_max_iter: maximum number of k-means iterations. Default 500 featurize_model_name: name of the model from which features are obtained. Default \'gpt2-large\' Use one of [\'gpt2\', \'gpt2-medium\', \'gpt2-large\', \'gpt2-xl\']. device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU max_text_length: maximum number of tokens to consider. Default 1024 divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25 mauve_scaling_factor: "c" from the paper. Default 5. verbose: If True (default), print running time updates seed: random seed to initialize k-means cluster assignments. Returns: mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer, frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer, divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve, p_hist: a discrete distribution, which is a quantized version of the text distribution p_text, q_hist: same as above, but with q_text. Examples: >>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest >>> import datasets >>> mauve = datasets.load_metric(\'mauve\') >>> predictions = ["hello there", "general kenobi"] >>> references = ["hello there", "general kenobi"] >>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP >>> print(out.mauve) # doctest: +SKIP 1.0 ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class _SCREAMING_SNAKE_CASE ( datasets.Metric ): def _A ( self : Dict ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , homepage="""https://github.com/krishnap25/mauve""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Value("""string""" , id="""sequence""" ), """references""": datasets.Value("""string""" , id="""sequence""" ), } ) , codebase_urls=["""https://github.com/krishnap25/mauve"""] , reference_urls=[ """https://arxiv.org/abs/2102.01454""", """https://github.com/krishnap25/mauve""", ] , ) def _A ( self : Optional[Any] , __lowerCamelCase : int , __lowerCamelCase : str , __lowerCamelCase : str=None , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : int=None , __lowerCamelCase : Tuple=None , __lowerCamelCase : Any="auto" , __lowerCamelCase : List[Any]=-1 , __lowerCamelCase : Optional[int]=0.9 , __lowerCamelCase : Dict=5 , __lowerCamelCase : Optional[Any]=500 , __lowerCamelCase : int="gpt2-large" , __lowerCamelCase : Union[str, Any]=-1 , __lowerCamelCase : List[str]=1_024 , __lowerCamelCase : Dict=25 , __lowerCamelCase : Any=5 , __lowerCamelCase : List[Any]=True , __lowerCamelCase : Tuple=25 , ): UpperCamelCase :int = compute_mauve( p_text=__lowerCamelCase , q_text=__lowerCamelCase , p_features=__lowerCamelCase , q_features=__lowerCamelCase , p_tokens=__lowerCamelCase , q_tokens=__lowerCamelCase , num_buckets=__lowerCamelCase , pca_max_data=__lowerCamelCase , kmeans_explained_var=__lowerCamelCase , kmeans_num_redo=__lowerCamelCase , kmeans_max_iter=__lowerCamelCase , featurize_model_name=__lowerCamelCase , device_id=__lowerCamelCase , max_text_length=__lowerCamelCase , divergence_curve_discretization_size=__lowerCamelCase , mauve_scaling_factor=__lowerCamelCase , verbose=__lowerCamelCase , seed=__lowerCamelCase , ) return out
38
import argparse import json import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils.deepspeed import DummyOptim, DummyScheduler UpperCAmelCase_ : Union[str, Any] = 16 UpperCAmelCase_ : int = 32 def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Accelerator , __magic_name__ : int = 16 , __magic_name__ : str = "bert-base-cased" ) -> Dict: """simple docstring""" UpperCamelCase :List[str] = AutoTokenizer.from_pretrained(__magic_name__ ) UpperCamelCase :Union[str, Any] = load_dataset("""glue""" , """mrpc""" ) def tokenize_function(__magic_name__ : Tuple ): # max_length=None => use the model max length (it's actually the default) UpperCamelCase :List[Any] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=__magic_name__ , max_length=__magic_name__ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset UpperCamelCase :List[Any] = datasets.map( __magic_name__ , batched=__magic_name__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , load_from_cache_file=__magic_name__ ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library UpperCamelCase :Optional[Any] = tokenized_datasets.rename_column("""label""" , """labels""" ) def collate_fn(__magic_name__ : Any ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(__magic_name__ , padding="""max_length""" , max_length=128 , return_tensors="""pt""" ) return tokenizer.pad(__magic_name__ , padding="""longest""" , return_tensors="""pt""" ) # Instantiate dataloaders. UpperCamelCase :List[str] = DataLoader( tokenized_datasets["""train"""] , shuffle=__magic_name__ , collate_fn=__magic_name__ , batch_size=__magic_name__ ) UpperCamelCase :List[Any] = DataLoader( tokenized_datasets["""validation"""] , shuffle=__magic_name__ , collate_fn=__magic_name__ , batch_size=__magic_name__ ) return train_dataloader, eval_dataloader def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Dict , __magic_name__ : Optional[Any] ) -> List[Any]: """simple docstring""" UpperCamelCase :Optional[Any] = Accelerator() # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs UpperCamelCase :Union[str, Any] = config["""lr"""] UpperCamelCase :List[str] = int(config["""num_epochs"""] ) UpperCamelCase :str = int(config["""seed"""] ) UpperCamelCase :Dict = int(config["""batch_size"""] ) UpperCamelCase :Union[str, Any] = args.model_name_or_path set_seed(__magic_name__ ) UpperCamelCase , UpperCamelCase :Dict = get_dataloaders(__magic_name__ , __magic_name__ , __magic_name__ ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) UpperCamelCase :List[str] = AutoModelForSequenceClassification.from_pretrained(__magic_name__ , return_dict=__magic_name__ ) # Instantiate optimizer UpperCamelCase :Union[str, Any] = ( AdamW if accelerator.state.deepspeed_plugin is None or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config else DummyOptim ) UpperCamelCase :Optional[Any] = optimizer_cls(params=model.parameters() , lr=__magic_name__ ) if accelerator.state.deepspeed_plugin is not None: UpperCamelCase :Any = accelerator.state.deepspeed_plugin.deepspeed_config[ """gradient_accumulation_steps""" ] else: UpperCamelCase :Any = 1 UpperCamelCase :Dict = (len(__magic_name__ ) * num_epochs) // gradient_accumulation_steps # Instantiate scheduler if ( accelerator.state.deepspeed_plugin is None or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config ): UpperCamelCase :List[Any] = get_linear_schedule_with_warmup( optimizer=__magic_name__ , num_warmup_steps=0 , num_training_steps=__magic_name__ , ) else: UpperCamelCase :Any = DummyScheduler(__magic_name__ , total_num_steps=__magic_name__ , warmup_num_steps=0 ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase :str = accelerator.prepare( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) # We need to keep track of how many total steps we have iterated over UpperCamelCase :int = 0 # We also need to keep track of the stating epoch so files are named properly UpperCamelCase :Tuple = 0 # Now we train the model UpperCamelCase :Any = evaluate.load("""glue""" , """mrpc""" ) UpperCamelCase :Tuple = 0 UpperCamelCase :List[Any] = {} for epoch in range(__magic_name__ , __magic_name__ ): model.train() for step, batch in enumerate(__magic_name__ ): UpperCamelCase :List[str] = model(**__magic_name__ ) UpperCamelCase :Dict = outputs.loss UpperCamelCase :Optional[int] = loss / gradient_accumulation_steps accelerator.backward(__magic_name__ ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() overall_step += 1 model.eval() UpperCamelCase :str = 0 for step, batch in enumerate(__magic_name__ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): UpperCamelCase :Optional[int] = model(**__magic_name__ ) UpperCamelCase :List[Any] = outputs.logits.argmax(dim=-1 ) # It is slightly faster to call this once, than multiple times UpperCamelCase , UpperCamelCase :Optional[int] = accelerator.gather( (predictions, batch["""labels"""]) ) # If we are in a multiprocess environment, the last batch has duplicates if accelerator.use_distributed: if step == len(__magic_name__ ) - 1: UpperCamelCase :Dict = predictions[: len(eval_dataloader.dataset ) - samples_seen] UpperCamelCase :List[str] = references[: len(eval_dataloader.dataset ) - samples_seen] else: samples_seen += references.shape[0] metric.add_batch( predictions=__magic_name__ , references=__magic_name__ , ) UpperCamelCase :List[str] = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f"""epoch {epoch}:""" , __magic_name__ ) UpperCamelCase :Dict = eval_metric["""accuracy"""] if best_performance < eval_metric["accuracy"]: UpperCamelCase :str = eval_metric["""accuracy"""] if args.performance_lower_bound is not None: assert ( args.performance_lower_bound <= best_performance ), f"""Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}""" accelerator.wait_for_everyone() if accelerator.is_main_process: with open(os.path.join(args.output_dir , """all_results.json""" ) , """w""" ) as f: json.dump(__magic_name__ , __magic_name__ ) def SCREAMING_SNAKE_CASE_ ( ) -> Tuple: """simple docstring""" UpperCamelCase :List[str] = argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""" ) parser.add_argument( """--model_name_or_path""" , type=__magic_name__ , default="""bert-base-cased""" , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=__magic_name__ , ) parser.add_argument( """--output_dir""" , type=__magic_name__ , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , ) parser.add_argument( """--performance_lower_bound""" , type=__magic_name__ , default=__magic_name__ , help="""Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.""" , ) parser.add_argument( """--num_epochs""" , type=__magic_name__ , default=3 , help="""Number of train epochs.""" , ) UpperCamelCase :str = parser.parse_args() UpperCamelCase :Any = {"""lr""": 2E-5, """num_epochs""": args.num_epochs, """seed""": 42, """batch_size""": 16} training_function(__magic_name__ , __magic_name__ ) if __name__ == "__main__": main()
38
1
import unittest from transformers import AlbertConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, AlbertForMaskedLM, AlbertForMultipleChoice, AlbertForPreTraining, AlbertForQuestionAnswering, AlbertForSequenceClassification, AlbertForTokenClassification, AlbertModel, ) from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST class _SCREAMING_SNAKE_CASE : def __init__( self : int , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[Any]=13 , __lowerCamelCase : List[str]=7 , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : Dict=True , __lowerCamelCase : str=True , __lowerCamelCase : int=True , __lowerCamelCase : int=99 , __lowerCamelCase : int=16 , __lowerCamelCase : int=36 , __lowerCamelCase : Any=6 , __lowerCamelCase : int=6 , __lowerCamelCase : Dict=6 , __lowerCamelCase : Tuple=37 , __lowerCamelCase : Union[str, Any]="gelu" , __lowerCamelCase : Optional[Any]=0.1 , __lowerCamelCase : List[str]=0.1 , __lowerCamelCase : str=512 , __lowerCamelCase : List[Any]=16 , __lowerCamelCase : Optional[int]=2 , __lowerCamelCase : List[str]=0.02 , __lowerCamelCase : Union[str, Any]=3 , __lowerCamelCase : Union[str, Any]=4 , __lowerCamelCase : Optional[Any]=None , ): UpperCamelCase :List[Any] = parent UpperCamelCase :Optional[int] = batch_size UpperCamelCase :Optional[Any] = seq_length UpperCamelCase :Optional[int] = is_training UpperCamelCase :str = use_input_mask UpperCamelCase :List[str] = use_token_type_ids UpperCamelCase :List[str] = use_labels UpperCamelCase :Union[str, Any] = vocab_size UpperCamelCase :Optional[Any] = embedding_size UpperCamelCase :str = hidden_size UpperCamelCase :Union[str, Any] = num_hidden_layers UpperCamelCase :Union[str, Any] = num_hidden_groups UpperCamelCase :Dict = num_attention_heads UpperCamelCase :int = intermediate_size UpperCamelCase :List[Any] = hidden_act UpperCamelCase :Any = hidden_dropout_prob UpperCamelCase :str = attention_probs_dropout_prob UpperCamelCase :Optional[int] = max_position_embeddings UpperCamelCase :Any = type_vocab_size UpperCamelCase :List[str] = type_sequence_label_size UpperCamelCase :int = initializer_range UpperCamelCase :List[Any] = num_labels UpperCamelCase :List[Any] = num_choices UpperCamelCase :List[Any] = scope def _A ( self : int ): UpperCamelCase :Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCamelCase :Dict = None if self.use_input_mask: UpperCamelCase :Dict = random_attention_mask([self.batch_size, self.seq_length] ) UpperCamelCase :Optional[Any] = None if self.use_token_type_ids: UpperCamelCase :List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) UpperCamelCase :str = None UpperCamelCase :Union[str, Any] = None UpperCamelCase :List[str] = None if self.use_labels: UpperCamelCase :List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCamelCase :Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCamelCase :int = ids_tensor([self.batch_size] , self.num_choices ) UpperCamelCase :str = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def _A ( self : int ): return AlbertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , ) def _A ( self : Any , __lowerCamelCase : int , __lowerCamelCase : Tuple , __lowerCamelCase : Tuple , __lowerCamelCase : Dict , __lowerCamelCase : List[Any] , __lowerCamelCase : str , __lowerCamelCase : str ): UpperCamelCase :Dict = AlbertModel(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() UpperCamelCase :List[Any] = model(__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase ) UpperCamelCase :str = model(__lowerCamelCase , token_type_ids=__lowerCamelCase ) UpperCamelCase :Dict = model(__lowerCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def _A ( self : Optional[Any] , __lowerCamelCase : str , __lowerCamelCase : List[Any] , __lowerCamelCase : Tuple , __lowerCamelCase : Tuple , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : List[str] ): UpperCamelCase :Tuple = AlbertForPreTraining(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() UpperCamelCase :Optional[Any] = model( __lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase , sentence_order_label=__lowerCamelCase , ) self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) ) def _A ( self : Optional[Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Dict , __lowerCamelCase : Optional[int] , __lowerCamelCase : Any , __lowerCamelCase : int ): UpperCamelCase :List[Any] = AlbertForMaskedLM(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() UpperCamelCase :Any = model(__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _A ( self : str , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : str , __lowerCamelCase : Any , __lowerCamelCase : Tuple , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : str ): UpperCamelCase :Optional[Any] = AlbertForQuestionAnswering(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() UpperCamelCase :List[str] = model( __lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , start_positions=__lowerCamelCase , end_positions=__lowerCamelCase , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def _A ( self : Tuple , __lowerCamelCase : Any , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : int , __lowerCamelCase : str , __lowerCamelCase : int ): UpperCamelCase :int = self.num_labels UpperCamelCase :int = AlbertForSequenceClassification(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() UpperCamelCase :Tuple = model(__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _A ( self : int , __lowerCamelCase : int , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Any , __lowerCamelCase : List[str] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Tuple , __lowerCamelCase : Dict ): UpperCamelCase :Union[str, Any] = self.num_labels UpperCamelCase :Any = AlbertForTokenClassification(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() UpperCamelCase :List[str] = model(__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def _A ( self : Optional[int] , __lowerCamelCase : List[str] , __lowerCamelCase : List[str] , __lowerCamelCase : List[str] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Tuple ): UpperCamelCase :Dict = self.num_choices UpperCamelCase :List[Any] = AlbertForMultipleChoice(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() UpperCamelCase :Any = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() UpperCamelCase :Dict = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() UpperCamelCase :List[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() UpperCamelCase :int = model( __lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def _A ( self : int ): UpperCamelCase :Any = self.prepare_config_and_inputs() ( ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ) :Optional[int] = config_and_inputs UpperCamelCase :Optional[int] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class _SCREAMING_SNAKE_CASE ( _a , _a , unittest.TestCase ): snake_case__ : int = ( ( AlbertModel, AlbertForPreTraining, AlbertForMaskedLM, AlbertForMultipleChoice, AlbertForSequenceClassification, AlbertForTokenClassification, AlbertForQuestionAnswering, ) if is_torch_available() else () ) snake_case__ : List[Any] = ( { """feature-extraction""": AlbertModel, """fill-mask""": AlbertForMaskedLM, """question-answering""": AlbertForQuestionAnswering, """text-classification""": AlbertForSequenceClassification, """token-classification""": AlbertForTokenClassification, """zero-shot""": AlbertForSequenceClassification, } if is_torch_available() else {} ) snake_case__ : Dict = True def _A ( self : List[Any] , __lowerCamelCase : List[str] , __lowerCamelCase : str , __lowerCamelCase : Dict=False ): UpperCamelCase :Tuple = super()._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase ) if return_labels: if model_class in get_values(__lowerCamelCase ): UpperCamelCase :Optional[int] = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__lowerCamelCase ) UpperCamelCase :Optional[int] = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__lowerCamelCase ) return inputs_dict def _A ( self : Any ): UpperCamelCase :Optional[int] = AlbertModelTester(self ) UpperCamelCase :Tuple = ConfigTester(self , config_class=__lowerCamelCase , hidden_size=37 ) def _A ( self : Dict ): self.config_tester.run_common_tests() def _A ( self : Dict ): UpperCamelCase :int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowerCamelCase ) def _A ( self : Union[str, Any] ): UpperCamelCase :Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*__lowerCamelCase ) def _A ( self : List[Any] ): UpperCamelCase :Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*__lowerCamelCase ) def _A ( self : Optional[int] ): UpperCamelCase :List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*__lowerCamelCase ) def _A ( self : str ): UpperCamelCase :Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*__lowerCamelCase ) def _A ( self : Dict ): UpperCamelCase :Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*__lowerCamelCase ) def _A ( self : Any ): UpperCamelCase :Union[str, Any] = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: UpperCamelCase :Optional[Any] = type self.model_tester.create_and_check_model(*__lowerCamelCase ) @slow def _A ( self : Dict ): for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCamelCase :List[Any] = AlbertModel.from_pretrained(__lowerCamelCase ) self.assertIsNotNone(__lowerCamelCase ) @require_torch class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): @slow def _A ( self : Union[str, Any] ): UpperCamelCase :int = AlbertModel.from_pretrained("""albert-base-v2""" ) UpperCamelCase :str = torch.tensor([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] ) UpperCamelCase :Any = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): UpperCamelCase :List[str] = model(__lowerCamelCase , attention_mask=__lowerCamelCase )[0] UpperCamelCase :Union[str, Any] = torch.Size((1, 11, 768) ) self.assertEqual(output.shape , __lowerCamelCase ) UpperCamelCase :Union[str, Any] = torch.tensor( [[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __lowerCamelCase , atol=1E-4 ) )
38
import os import unittest from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer from ...test_tokenization_common import TokenizerTesterMixin class _SCREAMING_SNAKE_CASE ( _a , unittest.TestCase ): snake_case__ : Optional[Any] = TransfoXLTokenizer snake_case__ : List[Any] = False snake_case__ : Tuple = False def _A ( self : str ): super().setUp() UpperCamelCase :Dict = [ """<unk>""", """[CLS]""", """[SEP]""", """want""", """unwanted""", """wa""", """un""", """running""", """,""", """low""", """l""", ] UpperCamelCase :str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) ) def _A ( self : List[str] , **__lowerCamelCase : Any ): UpperCamelCase :Any = True return TransfoXLTokenizer.from_pretrained(self.tmpdirname , **__lowerCamelCase ) def _A ( self : Any , __lowerCamelCase : int ): UpperCamelCase :List[Any] = """<unk> UNwanted , running""" UpperCamelCase :int = """<unk> unwanted, running""" return input_text, output_text def _A ( self : Tuple ): UpperCamelCase :List[str] = TransfoXLTokenizer(vocab_file=self.vocab_file , lower_case=__lowerCamelCase ) UpperCamelCase :Any = tokenizer.tokenize("""<unk> UNwanted , running""" ) self.assertListEqual(__lowerCamelCase , ["""<unk>""", """unwanted""", """,""", """running"""] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCamelCase ) , [0, 4, 8, 7] ) def _A ( self : Optional[Any] ): UpperCamelCase :List[Any] = TransfoXLTokenizer(lower_case=__lowerCamelCase ) self.assertListEqual( tokenizer.tokenize(""" \tHeLLo ! how \n Are yoU ? """ ) , ["""hello""", """!""", """how""", """are""", """you""", """?"""] ) def _A ( self : Union[str, Any] ): UpperCamelCase :int = TransfoXLTokenizer(lower_case=__lowerCamelCase ) self.assertListEqual( tokenizer.tokenize(""" \tHeLLo ! how \n Are yoU ? """ ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""] ) def _A ( self : Tuple ): UpperCamelCase :Any = TransfoXLTokenizer(lower_case=__lowerCamelCase ) UpperCamelCase :Optional[int] = """Hello (bracket) and side-scrolled [and] Henry's $5,000 with 3.34 m. What's up!?""" UpperCamelCase :Optional[int] = [ """Hello""", """(""", """bracket""", """)""", """and""", """side""", """@-@""", """scrolled""", """[""", """and""", """]""", """Henry""", """'s""", """$""", """5""", """@,@""", """000""", """with""", """3""", """@.@""", """34""", """m""", """.""", """What""", """'s""", """up""", """!""", """?""", ] self.assertListEqual(tokenizer.tokenize(__lowerCamelCase ) , __lowerCamelCase ) self.assertEqual(tokenizer.convert_tokens_to_string(__lowerCamelCase ) , __lowerCamelCase ) def _A ( self : List[Any] ): UpperCamelCase :Any = self.get_tokenizer() UpperCamelCase :List[str] = len(__lowerCamelCase ) tokenizer.add_tokens(["""new1""", """new2"""] ) tokenizer.move_added_token("""new1""" , 1 ) # Check that moved token is not copied (duplicate) self.assertEqual(len(__lowerCamelCase ) , original_len + 2 ) # Check that token is moved to specified id self.assertEqual(tokenizer.encode("""new1""" ) , [1] ) self.assertEqual(tokenizer.decode([1] ) , """new1""" )
38
1
import contextlib from multiprocessing import Pool, RLock from tqdm.auto import tqdm from ..utils import experimental, logging UpperCAmelCase_ : Dict = logging.get_logger(__name__) class _SCREAMING_SNAKE_CASE : snake_case__ : Dict = None @experimental def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Optional[int] , __magic_name__ : Optional[Any] , __magic_name__ : Optional[int] , __magic_name__ : Union[str, Any] , __magic_name__ : List[str] , __magic_name__ : Any , __magic_name__ : Dict ) -> Optional[int]: """simple docstring""" if ParallelBackendConfig.backend_name is None: return _map_with_multiprocessing_pool( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) return _map_with_joblib(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Any , __magic_name__ : Tuple , __magic_name__ : List[Any] , __magic_name__ : str , __magic_name__ : List[Any] , __magic_name__ : str , __magic_name__ : Dict ) -> Optional[int]: """simple docstring""" UpperCamelCase :Optional[Any] = num_proc if num_proc <= len(__magic_name__ ) else len(__magic_name__ ) UpperCamelCase :int = [] # We organize the splits ourselve (contiguous splits) for index in range(__magic_name__ ): UpperCamelCase :str = len(__magic_name__ ) // num_proc UpperCamelCase :List[Any] = len(__magic_name__ ) % num_proc UpperCamelCase :Union[str, Any] = div * index + min(__magic_name__ , __magic_name__ ) UpperCamelCase :Optional[Any] = start + div + (1 if index < mod else 0) split_kwds.append((function, iterable[start:end], types, index, disable_tqdm, desc) ) if len(__magic_name__ ) != sum(len(i[1] ) for i in split_kwds ): raise ValueError( f"""Error dividing inputs iterable among processes. """ f"""Total number of objects {len(__magic_name__ )}, """ f"""length: {sum(len(i[1] ) for i in split_kwds )}""" ) logger.info( f"""Spawning {num_proc} processes for {len(__magic_name__ )} objects in slices of {[len(i[1] ) for i in split_kwds]}""" ) UpperCamelCase , UpperCamelCase :List[str] = None, None if not disable_tqdm: UpperCamelCase , UpperCamelCase :Dict = (RLock(),), tqdm.set_lock with Pool(__magic_name__ , initargs=__magic_name__ , initializer=__magic_name__ ) as pool: UpperCamelCase :Dict = pool.map(__magic_name__ , __magic_name__ ) logger.info(f"""Finished {num_proc} processes""" ) UpperCamelCase :Optional[Any] = [obj for proc_res in mapped for obj in proc_res] logger.info(f"""Unpacked {len(__magic_name__ )} objects""" ) return mapped def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Dict , __magic_name__ : int , __magic_name__ : int , __magic_name__ : Union[str, Any] , __magic_name__ : Any , __magic_name__ : int , __magic_name__ : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" import joblib with joblib.parallel_backend(ParallelBackendConfig.backend_name , n_jobs=__magic_name__ ): return joblib.Parallel()( joblib.delayed(__magic_name__ )((function, obj, types, None, True, None) ) for obj in iterable ) @experimental @contextlib.contextmanager def SCREAMING_SNAKE_CASE_ ( __magic_name__ : str ) -> List[Any]: """simple docstring""" UpperCamelCase :Union[str, Any] = backend_name if backend_name == "spark": from joblibspark import register_spark register_spark() # TODO: call create_cache_and_write_probe if "download" in steps # TODO: raise NotImplementedError when Dataset.map etc is called try: yield finally: UpperCamelCase :Union[str, Any] = None
38
import argparse import torch # Step 1. clone https://github.com/microsoft/unilm # Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd # Step 3. cd unilm # Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink # import classes from unilm.wavlm.WavLM import WavLM as WavLMOrig from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig from transformers import WavLMConfig, WavLMModel, logging logging.set_verbosity_info() UpperCAmelCase_ : Optional[Any] = logging.get_logger(__name__) UpperCAmelCase_ : Optional[Any] = { '''post_extract_proj''': '''feature_projection.projection''', '''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''', '''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''', '''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''', '''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''', '''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''', '''self_attn.grep_linear''': '''encoder.layers.*.attention.gru_rel_pos_linear''', '''self_attn.relative_attention_bias''': '''encoder.layers.*.attention.rel_attn_embed''', '''self_attn.grep_a''': '''encoder.layers.*.attention.gru_rel_pos_const''', '''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''', '''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''', '''fc2''': '''encoder.layers.*.feed_forward.output_dense''', '''final_layer_norm''': '''encoder.layers.*.final_layer_norm''', '''encoder.layer_norm''': '''encoder.layer_norm''', '''w2v_model.layer_norm''': '''feature_projection.layer_norm''', '''quantizer.weight_proj''': '''quantizer.weight_proj''', '''quantizer.vars''': '''quantizer.codevectors''', '''project_q''': '''project_q''', '''final_proj''': '''project_hid''', '''w2v_encoder.proj''': '''ctc_proj''', '''mask_emb''': '''masked_spec_embed''', } UpperCAmelCase_ : int = [ '''ctc_proj''', '''quantizer.weight_proj''', '''quantizer.codevectors''', '''project_q''', '''project_hid''', ] def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Dict , __magic_name__ : Dict , __magic_name__ : Union[str, Any] , __magic_name__ : Tuple , __magic_name__ : Optional[int] ) -> Dict: """simple docstring""" for attribute in key.split(""".""" ): UpperCamelCase :Dict = getattr(__magic_name__ , __magic_name__ ) if weight_type is not None: UpperCamelCase :Optional[int] = getattr(__magic_name__ , __magic_name__ ).shape else: UpperCamelCase :Optional[int] = hf_pointer.shape assert hf_shape == value.shape, ( f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be""" f""" {value.shape} for {full_name}""" ) if weight_type == "weight": UpperCamelCase :str = value elif weight_type == "weight_g": UpperCamelCase :int = value elif weight_type == "weight_v": UpperCamelCase :int = value elif weight_type == "bias": UpperCamelCase :List[Any] = value else: UpperCamelCase :Any = value logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" ) def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Optional[Any] , __magic_name__ : List[str] ) -> Optional[Any]: """simple docstring""" UpperCamelCase :Union[str, Any] = [] UpperCamelCase :Dict = fairseq_model.state_dict() UpperCamelCase :int = hf_model.feature_extractor for name, value in fairseq_dict.items(): UpperCamelCase :str = False if "conv_layers" in name: load_conv_layer( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , hf_model.config.feat_extract_norm == """group""" , ) UpperCamelCase :Union[str, Any] = True else: for key, mapped_key in MAPPING.items(): if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]: UpperCamelCase :Optional[int] = True if "*" in mapped_key: UpperCamelCase :List[Any] = name.split(__magic_name__ )[0].split(""".""" )[-2] UpperCamelCase :int = mapped_key.replace("""*""" , __magic_name__ ) if "weight_g" in name: UpperCamelCase :List[Any] = """weight_g""" elif "weight_v" in name: UpperCamelCase :List[Any] = """weight_v""" elif "bias" in name and "relative_attention_bias" not in name: UpperCamelCase :Any = """bias""" elif "weight" in name: # TODO: don't match quantizer.weight_proj UpperCamelCase :List[str] = """weight""" else: UpperCamelCase :Optional[int] = None set_recursively(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) continue if not is_used: unused_weights.append(__magic_name__ ) logger.warning(f"""Unused weights: {unused_weights}""" ) def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Any , __magic_name__ : str , __magic_name__ : int , __magic_name__ : int , __magic_name__ : List[str] ) -> Dict: """simple docstring""" UpperCamelCase :Dict = full_name.split("""conv_layers.""" )[-1] UpperCamelCase :int = name.split(""".""" ) UpperCamelCase :str = int(items[0] ) UpperCamelCase :str = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) UpperCamelCase :Tuple = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) UpperCamelCase :Dict = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was""" " found." ) UpperCamelCase :Tuple = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" ) UpperCamelCase :Union[str, Any] = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(__magic_name__ ) @torch.no_grad() def SCREAMING_SNAKE_CASE_ ( __magic_name__ : List[str] , __magic_name__ : List[Any] , __magic_name__ : str=None ) -> int: """simple docstring""" UpperCamelCase :List[Any] = torch.load(__magic_name__ ) UpperCamelCase :List[Any] = WavLMConfigOrig(checkpoint["""cfg"""] ) UpperCamelCase :int = WavLMOrig(__magic_name__ ) model.load_state_dict(checkpoint["""model"""] ) model.eval() if config_path is not None: UpperCamelCase :List[Any] = WavLMConfig.from_pretrained(__magic_name__ ) else: UpperCamelCase :Any = WavLMConfig() UpperCamelCase :Dict = WavLMModel(__magic_name__ ) recursively_load_weights(__magic_name__ , __magic_name__ ) hf_wavlm.save_pretrained(__magic_name__ ) if __name__ == "__main__": UpperCAmelCase_ : Union[str, Any] = argparse.ArgumentParser() parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') UpperCAmelCase_ : Optional[int] = parser.parse_args() convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
38
1
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging UpperCAmelCase_ : Dict = logging.get_logger(__name__) UpperCAmelCase_ : Any = { '''kssteven/ibert-roberta-base''': '''https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json''', '''kssteven/ibert-roberta-large''': '''https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json''', '''kssteven/ibert-roberta-large-mnli''': ( '''https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json''' ), } class _SCREAMING_SNAKE_CASE ( _a ): snake_case__ : Dict = """ibert""" def __init__( self : Dict , __lowerCamelCase : Dict=30_522 , __lowerCamelCase : Optional[Any]=768 , __lowerCamelCase : List[Any]=12 , __lowerCamelCase : Tuple=12 , __lowerCamelCase : int=3_072 , __lowerCamelCase : Union[str, Any]="gelu" , __lowerCamelCase : List[str]=0.1 , __lowerCamelCase : str=0.1 , __lowerCamelCase : str=512 , __lowerCamelCase : List[str]=2 , __lowerCamelCase : int=0.02 , __lowerCamelCase : int=1E-12 , __lowerCamelCase : Optional[Any]=1 , __lowerCamelCase : List[Any]=0 , __lowerCamelCase : Tuple=2 , __lowerCamelCase : int="absolute" , __lowerCamelCase : int=False , __lowerCamelCase : Union[str, Any]="none" , **__lowerCamelCase : Union[str, Any] , ): super().__init__(pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase ) UpperCamelCase :List[Any] = vocab_size UpperCamelCase :Optional[int] = hidden_size UpperCamelCase :List[Any] = num_hidden_layers UpperCamelCase :Optional[int] = num_attention_heads UpperCamelCase :List[str] = hidden_act UpperCamelCase :Tuple = intermediate_size UpperCamelCase :Any = hidden_dropout_prob UpperCamelCase :Union[str, Any] = attention_probs_dropout_prob UpperCamelCase :Any = max_position_embeddings UpperCamelCase :int = type_vocab_size UpperCamelCase :List[str] = initializer_range UpperCamelCase :str = layer_norm_eps UpperCamelCase :int = position_embedding_type UpperCamelCase :Optional[Any] = quant_mode UpperCamelCase :int = force_dequant class _SCREAMING_SNAKE_CASE ( _a ): @property def _A ( self : int ): if self.task == "multiple-choice": UpperCamelCase :Union[str, Any] = {0: """batch""", 1: """choice""", 2: """sequence"""} else: UpperCamelCase :int = {0: """batch""", 1: """sequence"""} return OrderedDict( [ ("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ] )
38
import html from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin from ...utils import is_bsa_available, logging, requires_backends if is_bsa_available(): import bsa from bsa import BeautifulSoup UpperCAmelCase_ : Any = logging.get_logger(__name__) class _SCREAMING_SNAKE_CASE ( _a ): def __init__( self : Optional[int] , **__lowerCamelCase : Optional[int] ): requires_backends(self , ["""bs4"""] ) super().__init__(**__lowerCamelCase ) def _A ( self : List[str] , __lowerCamelCase : Any ): UpperCamelCase :Optional[int] = [] UpperCamelCase :List[str] = [] UpperCamelCase :Union[str, Any] = element if element.name else element.parent for parent in child.parents: # type: bs4.element.Tag UpperCamelCase :Optional[Any] = parent.find_all(child.name , recursive=__lowerCamelCase ) xpath_tags.append(child.name ) xpath_subscripts.append( 0 if 1 == len(__lowerCamelCase ) else next(i for i, s in enumerate(__lowerCamelCase , 1 ) if s is child ) ) UpperCamelCase :Any = parent xpath_tags.reverse() xpath_subscripts.reverse() return xpath_tags, xpath_subscripts def _A ( self : Any , __lowerCamelCase : Tuple ): UpperCamelCase :Any = BeautifulSoup(__lowerCamelCase , """html.parser""" ) UpperCamelCase :Union[str, Any] = [] UpperCamelCase :Tuple = [] UpperCamelCase :Tuple = [] for element in html_code.descendants: if type(__lowerCamelCase ) == bsa.element.NavigableString: if type(element.parent ) != bsa.element.Tag: continue UpperCamelCase :Any = html.unescape(__lowerCamelCase ).strip() if not text_in_this_tag: continue all_doc_strings.append(__lowerCamelCase ) UpperCamelCase , UpperCamelCase :Optional[Any] = self.xpath_soup(__lowerCamelCase ) stringaxtag_seq.append(__lowerCamelCase ) stringaxsubs_seq.append(__lowerCamelCase ) if len(__lowerCamelCase ) != len(__lowerCamelCase ): raise ValueError("""Number of doc strings and xtags does not correspond""" ) if len(__lowerCamelCase ) != len(__lowerCamelCase ): raise ValueError("""Number of doc strings and xsubs does not correspond""" ) return all_doc_strings, stringaxtag_seq, stringaxsubs_seq def _A ( self : int , __lowerCamelCase : List[Any] , __lowerCamelCase : List[str] ): UpperCamelCase :Tuple = """""" for tagname, subs in zip(__lowerCamelCase , __lowerCamelCase ): xpath += F"""/{tagname}""" if subs != 0: xpath += F"""[{subs}]""" return xpath def __call__( self : Any , __lowerCamelCase : Dict ): UpperCamelCase :Any = False # Check that strings has a valid type if isinstance(__lowerCamelCase , __lowerCamelCase ): UpperCamelCase :List[Any] = True elif isinstance(__lowerCamelCase , (list, tuple) ): if len(__lowerCamelCase ) == 0 or isinstance(html_strings[0] , __lowerCamelCase ): UpperCamelCase :Any = True if not valid_strings: raise ValueError( """HTML strings must of type `str`, `List[str]` (batch of examples), """ F"""but is of type {type(__lowerCamelCase )}.""" ) UpperCamelCase :str = bool(isinstance(__lowerCamelCase , (list, tuple) ) and (isinstance(html_strings[0] , __lowerCamelCase )) ) if not is_batched: UpperCamelCase :Any = [html_strings] # Get nodes + xpaths UpperCamelCase :Union[str, Any] = [] UpperCamelCase :str = [] for html_string in html_strings: UpperCamelCase , UpperCamelCase , UpperCamelCase :int = self.get_three_from_single(__lowerCamelCase ) nodes.append(__lowerCamelCase ) UpperCamelCase :int = [] for node, tag_list, sub_list in zip(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ): UpperCamelCase :str = self.construct_xpath(__lowerCamelCase , __lowerCamelCase ) xpath_strings.append(__lowerCamelCase ) xpaths.append(__lowerCamelCase ) # return as Dict UpperCamelCase :Optional[int] = {"""nodes""": nodes, """xpaths""": xpaths} UpperCamelCase :Any = BatchFeature(data=__lowerCamelCase , tensor_type=__lowerCamelCase ) return encoded_inputs
38
1
import argparse import torch from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert from transformers.utils import logging logging.set_verbosity_info() def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Optional[Any] , __magic_name__ : Dict , __magic_name__ : Union[str, Any] ) -> Optional[int]: """simple docstring""" UpperCamelCase :List[str] = RemBertConfig.from_json_file(__magic_name__ ) print("""Building PyTorch model from configuration: {}""".format(str(__magic_name__ ) ) ) UpperCamelCase :Optional[Any] = RemBertModel(__magic_name__ ) # Load weights from tf checkpoint load_tf_weights_in_rembert(__magic_name__ , __magic_name__ , __magic_name__ ) # Save pytorch-model print("""Save PyTorch model to {}""".format(__magic_name__ ) ) torch.save(model.state_dict() , __magic_name__ ) if __name__ == "__main__": UpperCAmelCase_ : Tuple = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.''' ) parser.add_argument( '''--rembert_config_file''', default=None, type=str, required=True, help=( '''The config json file corresponding to the pre-trained RemBERT model. \n''' '''This specifies the model architecture.''' ), ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) UpperCAmelCase_ : List[Any] = parser.parse_args() convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
38
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : list[list[int]] , __magic_name__ : int , __magic_name__ : int , __magic_name__ : list[int] ) -> bool: """simple docstring""" if graph[path[curr_ind - 1]][next_ver] == 0: return False # 2. Validate that next vertex is not already in path return not any(vertex == next_ver for vertex in path ) def SCREAMING_SNAKE_CASE_ ( __magic_name__ : list[list[int]] , __magic_name__ : list[int] , __magic_name__ : int ) -> bool: """simple docstring""" if curr_ind == len(__magic_name__ ): # return whether path exists between current and starting vertices return graph[path[curr_ind - 1]][path[0]] == 1 # Recursive Step for next_ver in range(0 , len(__magic_name__ ) ): if valid_connection(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ): # Insert current vertex into path as next transition UpperCamelCase :str = next_ver # Validate created path if util_hamilton_cycle(__magic_name__ , __magic_name__ , curr_ind + 1 ): return True # Backtrack UpperCamelCase :Union[str, Any] = -1 return False def SCREAMING_SNAKE_CASE_ ( __magic_name__ : list[list[int]] , __magic_name__ : int = 0 ) -> list[int]: """simple docstring""" UpperCamelCase :Union[str, Any] = [-1] * (len(__magic_name__ ) + 1) # initialize start and end of path with starting index UpperCamelCase :Any = start_index # evaluate and if we find answer return path either return empty array return path if util_hamilton_cycle(__magic_name__ , __magic_name__ , 1 ) else []
38
1
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os from ...utils.constants import SAGEMAKER_PARALLEL_EC2_INSTANCES, TORCH_DYNAMO_MODES from ...utils.dataclasses import ComputeEnvironment, SageMakerDistributedType from ...utils.imports import is_botoa_available from .config_args import SageMakerConfig from .config_utils import ( DYNAMO_BACKENDS, _ask_field, _ask_options, _convert_dynamo_backend, _convert_mixed_precision, _convert_sagemaker_distributed_mode, _convert_yes_no_to_bool, ) if is_botoa_available(): import botoa # noqa: F401 def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Optional[Any] ) -> Tuple: """simple docstring""" UpperCamelCase :str = botoa.client("""iam""" ) UpperCamelCase :Dict = { """Version""": """2012-10-17""", """Statement""": [ {"""Effect""": """Allow""", """Principal""": {"""Service""": """sagemaker.amazonaws.com"""}, """Action""": """sts:AssumeRole"""} ], } try: # create the role, associated with the chosen trust policy iam_client.create_role( RoleName=__magic_name__ , AssumeRolePolicyDocument=json.dumps(__magic_name__ , indent=2 ) ) UpperCamelCase :Optional[Any] = { """Version""": """2012-10-17""", """Statement""": [ { """Effect""": """Allow""", """Action""": [ """sagemaker:*""", """ecr:GetDownloadUrlForLayer""", """ecr:BatchGetImage""", """ecr:BatchCheckLayerAvailability""", """ecr:GetAuthorizationToken""", """cloudwatch:PutMetricData""", """cloudwatch:GetMetricData""", """cloudwatch:GetMetricStatistics""", """cloudwatch:ListMetrics""", """logs:CreateLogGroup""", """logs:CreateLogStream""", """logs:DescribeLogStreams""", """logs:PutLogEvents""", """logs:GetLogEvents""", """s3:CreateBucket""", """s3:ListBucket""", """s3:GetBucketLocation""", """s3:GetObject""", """s3:PutObject""", ], """Resource""": """*""", } ], } # attach policy to role iam_client.put_role_policy( RoleName=__magic_name__ , PolicyName=f"""{role_name}_policy_permission""" , PolicyDocument=json.dumps(__magic_name__ , indent=2 ) , ) except iam_client.exceptions.EntityAlreadyExistsException: print(f"""role {role_name} already exists. Using existing one""" ) def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Optional[int] ) -> int: """simple docstring""" UpperCamelCase :Any = botoa.client("""iam""" ) return iam_client.get_role(RoleName=__magic_name__ )["Role"]["Arn"] def SCREAMING_SNAKE_CASE_ ( ) -> Any: """simple docstring""" UpperCamelCase :Optional[Any] = _ask_options( """How do you want to authorize?""" , ["""AWS Profile""", """Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) """] , __magic_name__ , ) UpperCamelCase :List[str] = None if credentials_configuration == 0: UpperCamelCase :Union[str, Any] = _ask_field("""Enter your AWS Profile name: [default] """ , default="""default""" ) UpperCamelCase :List[str] = aws_profile else: print( """Note you will need to provide AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY when you launch you training script with,""" """`accelerate launch --aws_access_key_id XXX --aws_secret_access_key YYY`""" ) UpperCamelCase :int = _ask_field("""AWS Access Key ID: """ ) UpperCamelCase :str = aws_access_key_id UpperCamelCase :Optional[Any] = _ask_field("""AWS Secret Access Key: """ ) UpperCamelCase :Dict = aws_secret_access_key UpperCamelCase :Any = _ask_field("""Enter your AWS Region: [us-east-1]""" , default="""us-east-1""" ) UpperCamelCase :Tuple = aws_region UpperCamelCase :List[Any] = _ask_options( """Do you already have an IAM Role for executing Amazon SageMaker Training Jobs?""" , ["""Provide IAM Role name""", """Create new IAM role using credentials"""] , __magic_name__ , ) if role_management == 0: UpperCamelCase :str = _ask_field("""Enter your IAM role name: """ ) else: UpperCamelCase :List[Any] = """accelerate_sagemaker_execution_role""" print(f"""Accelerate will create an iam role \"{iam_role_name}\" using the provided credentials""" ) _create_iam_role_for_sagemaker(__magic_name__ ) UpperCamelCase :Any = _ask_field( """Do you want to use custom Docker image? [yes/NO]: """ , _convert_yes_no_to_bool , default=__magic_name__ , error_message="""Please enter yes or no.""" , ) UpperCamelCase :List[Any] = None if is_custom_docker_image: UpperCamelCase :Tuple = _ask_field("""Enter your Docker image: """ , lambda __magic_name__ : str(__magic_name__ ).lower() ) UpperCamelCase :Tuple = _ask_field( """Do you want to provide SageMaker input channels with data locations? [yes/NO]: """ , _convert_yes_no_to_bool , default=__magic_name__ , error_message="""Please enter yes or no.""" , ) UpperCamelCase :Optional[int] = None if is_sagemaker_inputs_enabled: UpperCamelCase :Dict = _ask_field( """Enter the path to the SageMaker inputs TSV file with columns (channel_name, data_location): """ , lambda __magic_name__ : str(__magic_name__ ).lower() , ) UpperCamelCase :List[str] = _ask_field( """Do you want to enable SageMaker metrics? [yes/NO]: """ , _convert_yes_no_to_bool , default=__magic_name__ , error_message="""Please enter yes or no.""" , ) UpperCamelCase :List[Any] = None if is_sagemaker_metrics_enabled: UpperCamelCase :Dict = _ask_field( """Enter the path to the SageMaker metrics TSV file with columns (metric_name, metric_regex): """ , lambda __magic_name__ : str(__magic_name__ ).lower() , ) UpperCamelCase :Tuple = _ask_options( """What is the distributed mode?""" , ["""No distributed training""", """Data parallelism"""] , _convert_sagemaker_distributed_mode , ) UpperCamelCase :List[str] = {} UpperCamelCase :Union[str, Any] = _ask_field( """Do you wish to optimize your script with torch dynamo?[yes/NO]:""" , _convert_yes_no_to_bool , default=__magic_name__ , error_message="""Please enter yes or no.""" , ) if use_dynamo: UpperCamelCase :Optional[Any] = """dynamo_""" UpperCamelCase :str = _ask_options( """Which dynamo backend would you like to use?""" , [x.lower() for x in DYNAMO_BACKENDS] , _convert_dynamo_backend , default=2 , ) UpperCamelCase :Optional[int] = _ask_field( """Do you want to customize the defaults sent to torch.compile? [yes/NO]: """ , _convert_yes_no_to_bool , default=__magic_name__ , error_message="""Please enter yes or no.""" , ) if use_custom_options: UpperCamelCase :Union[str, Any] = _ask_options( """Which mode do you want to use?""" , __magic_name__ , lambda __magic_name__ : TORCH_DYNAMO_MODES[int(__magic_name__ )] , default="""default""" , ) UpperCamelCase :List[str] = _ask_field( """Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: """ , _convert_yes_no_to_bool , default=__magic_name__ , error_message="""Please enter yes or no.""" , ) UpperCamelCase :Union[str, Any] = _ask_field( """Do you want to enable dynamic shape tracing? [yes/NO]: """ , _convert_yes_no_to_bool , default=__magic_name__ , error_message="""Please enter yes or no.""" , ) UpperCamelCase :Optional[int] = """Which EC2 instance type you want to use for your training?""" if distributed_type != SageMakerDistributedType.NO: UpperCamelCase :Union[str, Any] = _ask_options( __magic_name__ , __magic_name__ , lambda __magic_name__ : SAGEMAKER_PARALLEL_EC2_INSTANCES[int(__magic_name__ )] ) else: eca_instance_query += "? [ml.p3.2xlarge]:" UpperCamelCase :Any = _ask_field(__magic_name__ , lambda __magic_name__ : str(__magic_name__ ).lower() , default="""ml.p3.2xlarge""" ) UpperCamelCase :Any = 1 if distributed_type in (SageMakerDistributedType.DATA_PARALLEL, SageMakerDistributedType.MODEL_PARALLEL): UpperCamelCase :int = _ask_field( """How many machines do you want use? [1]: """ , __magic_name__ , default=1 , ) UpperCamelCase :List[Any] = _ask_options( """Do you wish to use FP16 or BF16 (mixed precision)?""" , ["""no""", """fp16""", """bf16""", """fp8"""] , _convert_mixed_precision , ) if use_dynamo and mixed_precision == "no": print( """Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts.""" ) return SageMakerConfig( image_uri=__magic_name__ , compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER , distributed_type=__magic_name__ , use_cpu=__magic_name__ , dynamo_config=__magic_name__ , eca_instance_type=__magic_name__ , profile=__magic_name__ , region=__magic_name__ , iam_role_name=__magic_name__ , mixed_precision=__magic_name__ , num_machines=__magic_name__ , sagemaker_inputs_file=__magic_name__ , sagemaker_metrics_file=__magic_name__ , )
38
import os import tempfile import unittest from transformers import FlaubertConfig, is_torch_available from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( FlaubertForMultipleChoice, FlaubertForQuestionAnswering, FlaubertForQuestionAnsweringSimple, FlaubertForSequenceClassification, FlaubertForTokenClassification, FlaubertModel, FlaubertWithLMHeadModel, ) from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST class _SCREAMING_SNAKE_CASE ( _a ): def __init__( self : List[str] , __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any]=13 , __lowerCamelCase : str=7 , __lowerCamelCase : Tuple=True , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : Any=True , __lowerCamelCase : List[Any]=True , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : Optional[int]=False , __lowerCamelCase : str=False , __lowerCamelCase : List[Any]=False , __lowerCamelCase : List[Any]=2 , __lowerCamelCase : Union[str, Any]=99 , __lowerCamelCase : Optional[Any]=0 , __lowerCamelCase : Tuple=32 , __lowerCamelCase : Any=5 , __lowerCamelCase : Optional[Any]=4 , __lowerCamelCase : Union[str, Any]=0.1 , __lowerCamelCase : Union[str, Any]=0.1 , __lowerCamelCase : List[str]=512 , __lowerCamelCase : List[Any]=12 , __lowerCamelCase : int=2 , __lowerCamelCase : List[str]=0.02 , __lowerCamelCase : Union[str, Any]=3 , __lowerCamelCase : Tuple=4 , __lowerCamelCase : Optional[int]="last" , __lowerCamelCase : Optional[Any]=None , __lowerCamelCase : List[str]=None , ): UpperCamelCase :int = parent UpperCamelCase :Optional[int] = batch_size UpperCamelCase :str = seq_length UpperCamelCase :Optional[int] = is_training UpperCamelCase :Optional[int] = use_input_lengths UpperCamelCase :Union[str, Any] = use_token_type_ids UpperCamelCase :List[str] = use_labels UpperCamelCase :Dict = gelu_activation UpperCamelCase :Optional[int] = sinusoidal_embeddings UpperCamelCase :List[Any] = causal UpperCamelCase :Optional[int] = asm UpperCamelCase :List[str] = n_langs UpperCamelCase :int = vocab_size UpperCamelCase :List[Any] = n_special UpperCamelCase :List[Any] = hidden_size UpperCamelCase :List[str] = num_hidden_layers UpperCamelCase :List[Any] = num_attention_heads UpperCamelCase :Tuple = hidden_dropout_prob UpperCamelCase :List[str] = attention_probs_dropout_prob UpperCamelCase :Tuple = max_position_embeddings UpperCamelCase :List[str] = type_vocab_size UpperCamelCase :Union[str, Any] = type_sequence_label_size UpperCamelCase :int = initializer_range UpperCamelCase :List[str] = num_labels UpperCamelCase :Optional[int] = num_choices UpperCamelCase :Optional[Any] = summary_type UpperCamelCase :Tuple = use_proj UpperCamelCase :Optional[Any] = scope def _A ( self : List[str] ): UpperCamelCase :Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCamelCase :Any = random_attention_mask([self.batch_size, self.seq_length] ) UpperCamelCase :List[Any] = None if self.use_input_lengths: UpperCamelCase :Dict = ( ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2 ) # small variation of seq_length UpperCamelCase :str = None if self.use_token_type_ids: UpperCamelCase :int = ids_tensor([self.batch_size, self.seq_length] , self.n_langs ) UpperCamelCase :Optional[int] = None UpperCamelCase :int = None UpperCamelCase :List[Any] = None if self.use_labels: UpperCamelCase :Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCamelCase :List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCamelCase :List[str] = ids_tensor([self.batch_size] , 2 ).float() UpperCamelCase :List[str] = ids_tensor([self.batch_size] , self.num_choices ) UpperCamelCase :Union[str, Any] = self.get_config() return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def _A ( self : List[Any] ): return FlaubertConfig( vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , ) def _A ( self : Union[str, Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : int , __lowerCamelCase : Tuple , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] , __lowerCamelCase : Any , __lowerCamelCase : int , ): UpperCamelCase :Tuple = FlaubertModel(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() UpperCamelCase :int = model(__lowerCamelCase , lengths=__lowerCamelCase , langs=__lowerCamelCase ) UpperCamelCase :List[Any] = model(__lowerCamelCase , langs=__lowerCamelCase ) UpperCamelCase :int = model(__lowerCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _A ( self : Optional[int] , __lowerCamelCase : List[str] , __lowerCamelCase : Any , __lowerCamelCase : Tuple , __lowerCamelCase : int , __lowerCamelCase : List[Any] , __lowerCamelCase : Tuple , __lowerCamelCase : List[Any] , __lowerCamelCase : Any , __lowerCamelCase : Dict , ): UpperCamelCase :Any = FlaubertWithLMHeadModel(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() UpperCamelCase :Dict = model(__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _A ( self : List[Any] , __lowerCamelCase : str , __lowerCamelCase : Tuple , __lowerCamelCase : List[str] , __lowerCamelCase : int , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : str , __lowerCamelCase : List[str] , __lowerCamelCase : List[Any] , ): UpperCamelCase :Any = FlaubertForQuestionAnsweringSimple(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() UpperCamelCase :Any = model(__lowerCamelCase ) UpperCamelCase :int = model(__lowerCamelCase , start_positions=__lowerCamelCase , end_positions=__lowerCamelCase ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def _A ( self : str , __lowerCamelCase : int , __lowerCamelCase : str , __lowerCamelCase : Optional[Any] , __lowerCamelCase : int , __lowerCamelCase : Tuple , __lowerCamelCase : Any , __lowerCamelCase : List[str] , __lowerCamelCase : Dict , __lowerCamelCase : str , ): UpperCamelCase :str = FlaubertForQuestionAnswering(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() UpperCamelCase :Any = model(__lowerCamelCase ) UpperCamelCase :Optional[int] = model( __lowerCamelCase , start_positions=__lowerCamelCase , end_positions=__lowerCamelCase , cls_index=__lowerCamelCase , is_impossible=__lowerCamelCase , p_mask=__lowerCamelCase , ) UpperCamelCase :Union[str, Any] = model( __lowerCamelCase , start_positions=__lowerCamelCase , end_positions=__lowerCamelCase , cls_index=__lowerCamelCase , is_impossible=__lowerCamelCase , ) ((UpperCamelCase) , ) :int = result_with_labels.to_tuple() UpperCamelCase :int = model(__lowerCamelCase , start_positions=__lowerCamelCase , end_positions=__lowerCamelCase ) ((UpperCamelCase) , ) :List[Any] = result_with_labels.to_tuple() self.parent.assertEqual(result_with_labels.loss.shape , () ) self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual( result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual( result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) ) def _A ( self : List[Any] , __lowerCamelCase : Any , __lowerCamelCase : Any , __lowerCamelCase : int , __lowerCamelCase : Dict , __lowerCamelCase : Any , __lowerCamelCase : Any , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Tuple , ): UpperCamelCase :Optional[int] = FlaubertForSequenceClassification(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() UpperCamelCase :Tuple = model(__lowerCamelCase ) UpperCamelCase :List[str] = model(__lowerCamelCase , labels=__lowerCamelCase ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def _A ( self : Any , __lowerCamelCase : int , __lowerCamelCase : Optional[int] , __lowerCamelCase : str , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : int , __lowerCamelCase : Optional[int] , __lowerCamelCase : Tuple , __lowerCamelCase : List[Any] , __lowerCamelCase : List[Any] , ): UpperCamelCase :Dict = self.num_labels UpperCamelCase :Tuple = FlaubertForTokenClassification(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() UpperCamelCase :Optional[Any] = model(__lowerCamelCase , attention_mask=__lowerCamelCase , labels=__lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def _A ( self : Tuple , __lowerCamelCase : Tuple , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[str] , __lowerCamelCase : Tuple , __lowerCamelCase : Tuple , __lowerCamelCase : List[str] , __lowerCamelCase : List[Any] , ): UpperCamelCase :Union[str, Any] = self.num_choices UpperCamelCase :List[Any] = FlaubertForMultipleChoice(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() UpperCamelCase :Optional[Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() UpperCamelCase :Optional[int] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() UpperCamelCase :int = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() UpperCamelCase :Union[str, Any] = model( __lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def _A ( self : str ): UpperCamelCase :List[str] = self.prepare_config_and_inputs() ( ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ) :List[Any] = config_and_inputs UpperCamelCase :Union[str, Any] = { """input_ids""": input_ids, """token_type_ids""": token_type_ids, """lengths""": input_lengths, """attention_mask""": input_mask, } return config, inputs_dict @require_torch class _SCREAMING_SNAKE_CASE ( _a , _a , unittest.TestCase ): snake_case__ : Optional[int] = ( ( FlaubertModel, FlaubertWithLMHeadModel, FlaubertForQuestionAnswering, FlaubertForQuestionAnsweringSimple, FlaubertForSequenceClassification, FlaubertForTokenClassification, FlaubertForMultipleChoice, ) if is_torch_available() else () ) snake_case__ : Tuple = ( { """feature-extraction""": FlaubertModel, """fill-mask""": FlaubertWithLMHeadModel, """question-answering""": FlaubertForQuestionAnsweringSimple, """text-classification""": FlaubertForSequenceClassification, """token-classification""": FlaubertForTokenClassification, """zero-shot""": FlaubertForSequenceClassification, } if is_torch_available() else {} ) def _A ( self : int , __lowerCamelCase : List[str] , __lowerCamelCase : Tuple , __lowerCamelCase : List[str] , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int] ): if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith("""Fast""" ) ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def _A ( self : Optional[int] , __lowerCamelCase : Dict , __lowerCamelCase : Tuple , __lowerCamelCase : Tuple=False ): UpperCamelCase :Tuple = super()._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase ) if return_labels: if model_class.__name__ == "FlaubertForQuestionAnswering": UpperCamelCase :Tuple = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__lowerCamelCase ) UpperCamelCase :List[Any] = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__lowerCamelCase ) return inputs_dict def _A ( self : str ): UpperCamelCase :List[Any] = FlaubertModelTester(self ) UpperCamelCase :Any = ConfigTester(self , config_class=__lowerCamelCase , emb_dim=37 ) def _A ( self : Optional[int] ): self.config_tester.run_common_tests() def _A ( self : List[Any] ): UpperCamelCase :List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_model(*__lowerCamelCase ) def _A ( self : Optional[int] ): UpperCamelCase :Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_lm_head(*__lowerCamelCase ) def _A ( self : List[Any] ): UpperCamelCase :Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_simple_qa(*__lowerCamelCase ) def _A ( self : Union[str, Any] ): UpperCamelCase :int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_qa(*__lowerCamelCase ) def _A ( self : Optional[Any] ): UpperCamelCase :List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_sequence_classif(*__lowerCamelCase ) def _A ( self : Tuple ): UpperCamelCase :Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_token_classif(*__lowerCamelCase ) def _A ( self : int ): UpperCamelCase :Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_multiple_choice(*__lowerCamelCase ) @slow def _A ( self : Any ): for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCamelCase :Optional[int] = FlaubertModel.from_pretrained(__lowerCamelCase ) self.assertIsNotNone(__lowerCamelCase ) @slow @require_torch_gpu def _A ( self : Tuple ): UpperCamelCase , UpperCamelCase :Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # FlauBertForMultipleChoice behaves incorrectly in JIT environments. if model_class == FlaubertForMultipleChoice: return UpperCamelCase :Optional[Any] = True UpperCamelCase :Optional[Any] = model_class(config=__lowerCamelCase ) UpperCamelCase :str = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) UpperCamelCase :str = torch.jit.trace( __lowerCamelCase , (inputs_dict["""input_ids"""].to("""cpu""" ), inputs_dict["""attention_mask"""].to("""cpu""" )) ) with tempfile.TemporaryDirectory() as tmp: torch.jit.save(__lowerCamelCase , os.path.join(__lowerCamelCase , """traced_model.pt""" ) ) UpperCamelCase :int = torch.jit.load(os.path.join(__lowerCamelCase , """traced_model.pt""" ) , map_location=__lowerCamelCase ) loaded(inputs_dict["""input_ids"""].to(__lowerCamelCase ) , inputs_dict["""attention_mask"""].to(__lowerCamelCase ) ) @require_torch class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): @slow def _A ( self : Optional[Any] ): UpperCamelCase :Union[str, Any] = FlaubertModel.from_pretrained("""flaubert/flaubert_base_cased""" ) UpperCamelCase :Optional[Any] = torch.tensor([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] ) with torch.no_grad(): UpperCamelCase :Tuple = model(__lowerCamelCase )[0] UpperCamelCase :Union[str, Any] = torch.Size((1, 11, 768) ) self.assertEqual(output.shape , __lowerCamelCase ) UpperCamelCase :int = torch.tensor( [[[-2.6251, -1.4298, -0.0227], [-2.8510, -1.6387, 0.2258], [-2.8114, -1.1832, -0.3066]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , __lowerCamelCase , atol=1E-4 ) )
38
1
from __future__ import annotations UpperCAmelCase_ : int = [ [-1, 0], # left [0, -1], # down [1, 0], # right [0, 1], # up ] def SCREAMING_SNAKE_CASE_ ( __magic_name__ : list[list[int]] , __magic_name__ : list[int] , __magic_name__ : list[int] , __magic_name__ : int , __magic_name__ : list[list[int]] , ) -> tuple[list[list[int]], list[list[int]]]: """simple docstring""" UpperCamelCase :int = [ [0 for col in range(len(grid[0] ) )] for row in range(len(__magic_name__ ) ) ] # the reference grid UpperCamelCase :Tuple = 1 UpperCamelCase :List[Any] = [ [0 for col in range(len(grid[0] ) )] for row in range(len(__magic_name__ ) ) ] # the action grid UpperCamelCase :Dict = init[0] UpperCamelCase :Any = init[1] UpperCamelCase :int = 0 UpperCamelCase :str = g + heuristic[x][y] # cost from starting cell to destination cell UpperCamelCase :Union[str, Any] = [[f, g, x, y]] UpperCamelCase :Any = False # flag that is set when search is complete UpperCamelCase :Union[str, Any] = False # flag set if we can't find expand while not found and not resign: if len(__magic_name__ ) == 0: raise ValueError("""Algorithm is unable to find solution""" ) else: # to choose the least costliest action so as to move closer to the goal cell.sort() cell.reverse() UpperCamelCase :Dict = cell.pop() UpperCamelCase :Dict = next_cell[2] UpperCamelCase :List[Any] = next_cell[3] UpperCamelCase :List[str] = next_cell[1] if x == goal[0] and y == goal[1]: UpperCamelCase :List[Any] = True else: for i in range(len(__magic_name__ ) ): # to try out different valid actions UpperCamelCase :Any = x + DIRECTIONS[i][0] UpperCamelCase :Optional[Any] = y + DIRECTIONS[i][1] if xa >= 0 and xa < len(__magic_name__ ) and ya >= 0 and ya < len(grid[0] ): if closed[xa][ya] == 0 and grid[xa][ya] == 0: UpperCamelCase :int = g + cost UpperCamelCase :Union[str, Any] = ga + heuristic[xa][ya] cell.append([fa, ga, xa, ya] ) UpperCamelCase :List[Any] = 1 UpperCamelCase :Dict = i UpperCamelCase :List[Any] = [] UpperCamelCase :Optional[Any] = goal[0] UpperCamelCase :Optional[Any] = goal[1] invpath.append([x, y] ) # we get the reverse path from here while x != init[0] or y != init[1]: UpperCamelCase :str = x - DIRECTIONS[action[x][y]][0] UpperCamelCase :Optional[int] = y - DIRECTIONS[action[x][y]][1] UpperCamelCase :int = xa UpperCamelCase :Tuple = ya invpath.append([x, y] ) UpperCamelCase :Optional[int] = [] for i in range(len(__magic_name__ ) ): path.append(invpath[len(__magic_name__ ) - 1 - i] ) return path, action if __name__ == "__main__": UpperCAmelCase_ : Optional[int] = [ [0, 1, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles [0, 1, 0, 0, 0, 0], [0, 1, 0, 0, 1, 0], [0, 0, 0, 0, 1, 0], ] UpperCAmelCase_ : int = [0, 0] # all coordinates are given in format [y,x] UpperCAmelCase_ : int = [len(grid) - 1, len(grid[0]) - 1] UpperCAmelCase_ : Optional[Any] = 1 # the cost map which pushes the path closer to the goal UpperCAmelCase_ : List[Any] = [[0 for row in range(len(grid[0]))] for col in range(len(grid))] for i in range(len(grid)): for j in range(len(grid[0])): UpperCAmelCase_ : int = abs(i - goal[0]) + abs(j - goal[1]) if grid[i][j] == 1: # added extra penalty in the heuristic map UpperCAmelCase_ : Union[str, Any] = 99 UpperCAmelCase_ , UpperCAmelCase_ : List[str] = search(grid, init, goal, cost, heuristic) print('''ACTION MAP''') for i in range(len(action)): print(action[i]) for i in range(len(path)): print(path[i])
38
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor from .base import PipelineTool class _SCREAMING_SNAKE_CASE ( _a ): snake_case__ : Any = """openai/whisper-base""" snake_case__ : Optional[int] = ( """This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the """ """transcribed text.""" ) snake_case__ : Any = """transcriber""" snake_case__ : Optional[int] = WhisperProcessor snake_case__ : str = WhisperForConditionalGeneration snake_case__ : Optional[Any] = ["""audio"""] snake_case__ : Any = ["""text"""] def _A ( self : str , __lowerCamelCase : Dict ): return self.pre_processor(__lowerCamelCase , return_tensors="""pt""" ).input_features def _A ( self : Dict , __lowerCamelCase : List[Any] ): return self.model.generate(inputs=__lowerCamelCase ) def _A ( self : Any , __lowerCamelCase : Optional[Any] ): return self.pre_processor.batch_decode(__lowerCamelCase , skip_special_tokens=__lowerCamelCase )[0]
38
1
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_bert import BertTokenizer UpperCAmelCase_ : Optional[Any] = logging.get_logger(__name__) UpperCAmelCase_ : Optional[Any] = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''} UpperCAmelCase_ : Tuple = { '''vocab_file''': { '''bert-base-uncased''': '''https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt''', '''bert-large-uncased''': '''https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt''', '''bert-base-cased''': '''https://huggingface.co/bert-base-cased/resolve/main/vocab.txt''', '''bert-large-cased''': '''https://huggingface.co/bert-large-cased/resolve/main/vocab.txt''', '''bert-base-multilingual-uncased''': ( '''https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt''' ), '''bert-base-multilingual-cased''': '''https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt''', '''bert-base-chinese''': '''https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt''', '''bert-base-german-cased''': '''https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt''', '''bert-large-uncased-whole-word-masking''': ( '''https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt''' ), '''bert-large-cased-whole-word-masking''': ( '''https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt''' ), '''bert-large-uncased-whole-word-masking-finetuned-squad''': ( '''https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt''' ), '''bert-large-cased-whole-word-masking-finetuned-squad''': ( '''https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt''' ), '''bert-base-cased-finetuned-mrpc''': ( '''https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt''' ), '''bert-base-german-dbmdz-cased''': '''https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt''', '''bert-base-german-dbmdz-uncased''': ( '''https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt''' ), '''TurkuNLP/bert-base-finnish-cased-v1''': ( '''https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt''' ), '''TurkuNLP/bert-base-finnish-uncased-v1''': ( '''https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt''' ), '''wietsedv/bert-base-dutch-cased''': ( '''https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt''' ), }, '''tokenizer_file''': { '''bert-base-uncased''': '''https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json''', '''bert-large-uncased''': '''https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json''', '''bert-base-cased''': '''https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json''', '''bert-large-cased''': '''https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json''', '''bert-base-multilingual-uncased''': ( '''https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json''' ), '''bert-base-multilingual-cased''': ( '''https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json''' ), '''bert-base-chinese''': '''https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json''', '''bert-base-german-cased''': '''https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json''', '''bert-large-uncased-whole-word-masking''': ( '''https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json''' ), '''bert-large-cased-whole-word-masking''': ( '''https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json''' ), '''bert-large-uncased-whole-word-masking-finetuned-squad''': ( '''https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json''' ), '''bert-large-cased-whole-word-masking-finetuned-squad''': ( '''https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json''' ), '''bert-base-cased-finetuned-mrpc''': ( '''https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json''' ), '''bert-base-german-dbmdz-cased''': ( '''https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json''' ), '''bert-base-german-dbmdz-uncased''': ( '''https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json''' ), '''TurkuNLP/bert-base-finnish-cased-v1''': ( '''https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json''' ), '''TurkuNLP/bert-base-finnish-uncased-v1''': ( '''https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json''' ), '''wietsedv/bert-base-dutch-cased''': ( '''https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json''' ), }, } UpperCAmelCase_ : Optional[int] = { '''bert-base-uncased''': 5_12, '''bert-large-uncased''': 5_12, '''bert-base-cased''': 5_12, '''bert-large-cased''': 5_12, '''bert-base-multilingual-uncased''': 5_12, '''bert-base-multilingual-cased''': 5_12, '''bert-base-chinese''': 5_12, '''bert-base-german-cased''': 5_12, '''bert-large-uncased-whole-word-masking''': 5_12, '''bert-large-cased-whole-word-masking''': 5_12, '''bert-large-uncased-whole-word-masking-finetuned-squad''': 5_12, '''bert-large-cased-whole-word-masking-finetuned-squad''': 5_12, '''bert-base-cased-finetuned-mrpc''': 5_12, '''bert-base-german-dbmdz-cased''': 5_12, '''bert-base-german-dbmdz-uncased''': 5_12, '''TurkuNLP/bert-base-finnish-cased-v1''': 5_12, '''TurkuNLP/bert-base-finnish-uncased-v1''': 5_12, '''wietsedv/bert-base-dutch-cased''': 5_12, } UpperCAmelCase_ : List[str] = { '''bert-base-uncased''': {'''do_lower_case''': True}, '''bert-large-uncased''': {'''do_lower_case''': True}, '''bert-base-cased''': {'''do_lower_case''': False}, '''bert-large-cased''': {'''do_lower_case''': False}, '''bert-base-multilingual-uncased''': {'''do_lower_case''': True}, '''bert-base-multilingual-cased''': {'''do_lower_case''': False}, '''bert-base-chinese''': {'''do_lower_case''': False}, '''bert-base-german-cased''': {'''do_lower_case''': False}, '''bert-large-uncased-whole-word-masking''': {'''do_lower_case''': True}, '''bert-large-cased-whole-word-masking''': {'''do_lower_case''': False}, '''bert-large-uncased-whole-word-masking-finetuned-squad''': {'''do_lower_case''': True}, '''bert-large-cased-whole-word-masking-finetuned-squad''': {'''do_lower_case''': False}, '''bert-base-cased-finetuned-mrpc''': {'''do_lower_case''': False}, '''bert-base-german-dbmdz-cased''': {'''do_lower_case''': False}, '''bert-base-german-dbmdz-uncased''': {'''do_lower_case''': True}, '''TurkuNLP/bert-base-finnish-cased-v1''': {'''do_lower_case''': False}, '''TurkuNLP/bert-base-finnish-uncased-v1''': {'''do_lower_case''': True}, '''wietsedv/bert-base-dutch-cased''': {'''do_lower_case''': False}, } class _SCREAMING_SNAKE_CASE ( _a ): snake_case__ : Tuple = VOCAB_FILES_NAMES snake_case__ : List[str] = PRETRAINED_VOCAB_FILES_MAP snake_case__ : Optional[Any] = PRETRAINED_INIT_CONFIGURATION snake_case__ : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES snake_case__ : List[str] = BertTokenizer def __init__( self : Optional[Any] , __lowerCamelCase : str=None , __lowerCamelCase : Tuple=None , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : Tuple="[UNK]" , __lowerCamelCase : Dict="[SEP]" , __lowerCamelCase : Any="[PAD]" , __lowerCamelCase : str="[CLS]" , __lowerCamelCase : Dict="[MASK]" , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : Tuple=None , **__lowerCamelCase : Union[str, Any] , ): super().__init__( __lowerCamelCase , tokenizer_file=__lowerCamelCase , do_lower_case=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , pad_token=__lowerCamelCase , cls_token=__lowerCamelCase , mask_token=__lowerCamelCase , tokenize_chinese_chars=__lowerCamelCase , strip_accents=__lowerCamelCase , **__lowerCamelCase , ) UpperCamelCase :Any = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get("""lowercase""" , __lowerCamelCase ) != do_lower_case or normalizer_state.get("""strip_accents""" , __lowerCamelCase ) != strip_accents or normalizer_state.get("""handle_chinese_chars""" , __lowerCamelCase ) != tokenize_chinese_chars ): UpperCamelCase :Union[str, Any] = getattr(__lowerCamelCase , normalizer_state.pop("""type""" ) ) UpperCamelCase :Optional[int] = do_lower_case UpperCamelCase :Tuple = strip_accents UpperCamelCase :str = tokenize_chinese_chars UpperCamelCase :str = normalizer_class(**__lowerCamelCase ) UpperCamelCase :Optional[int] = do_lower_case def _A ( self : Any , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Any=None ): UpperCamelCase :Tuple = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def _A ( self : Union[str, Any] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ): UpperCamelCase :Optional[int] = [self.sep_token_id] UpperCamelCase :Optional[int] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def _A ( self : List[Any] , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ): UpperCamelCase :Dict = self._tokenizer.model.save(__lowerCamelCase , name=__lowerCamelCase ) return tuple(__lowerCamelCase )
38
import copy from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Audio, Features, Value from .base import TaskTemplate @dataclass(frozen=_a ) class _SCREAMING_SNAKE_CASE ( _a ): snake_case__ : str = field(default="""automatic-speech-recognition""" , metadata={"""include_in_asdict_even_if_is_default""": True} ) snake_case__ : ClassVar[Features] = Features({"""audio""": Audio()} ) snake_case__ : ClassVar[Features] = Features({"""transcription""": Value("""string""" )} ) snake_case__ : str = "audio" snake_case__ : str = "transcription" def _A ( self : List[str] , __lowerCamelCase : Dict ): if self.audio_column not in features: raise ValueError(F"""Column {self.audio_column} is not present in features.""" ) if not isinstance(features[self.audio_column] , __lowerCamelCase ): raise ValueError(F"""Column {self.audio_column} is not an Audio type.""" ) UpperCamelCase :int = copy.deepcopy(self ) UpperCamelCase :Any = self.input_schema.copy() UpperCamelCase :List[str] = features[self.audio_column] UpperCamelCase :List[Any] = input_schema return task_template @property def _A ( self : Optional[int] ): return {self.audio_column: "audio", self.transcription_column: "transcription"}
38
1
import argparse import os from io import BytesIO from pathlib import Path import requests from clip_retrieval.clip_client import ClipClient from PIL import Image from tqdm import tqdm def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Union[str, Any] , __magic_name__ : Optional[int] , __magic_name__ : int ) -> str: """simple docstring""" UpperCamelCase :int = 1.5 UpperCamelCase :str = int(factor * num_class_images ) UpperCamelCase :int = ClipClient( url="""https://knn.laion.ai/knn-service""" , indice_name="""laion_400m""" , num_images=__magic_name__ , aesthetic_weight=0.1 ) os.makedirs(f"""{class_data_dir}/images""" , exist_ok=__magic_name__ ) if len(list(Path(f"""{class_data_dir}/images""" ).iterdir() ) ) >= num_class_images: return while True: UpperCamelCase :Optional[int] = client.query(text=__magic_name__ ) if len(__magic_name__ ) >= factor * num_class_images or num_images > 1E4: break else: UpperCamelCase :str = int(factor * num_images ) UpperCamelCase :Any = ClipClient( url="""https://knn.laion.ai/knn-service""" , indice_name="""laion_400m""" , num_images=__magic_name__ , aesthetic_weight=0.1 , ) UpperCamelCase :Optional[Any] = 0 UpperCamelCase :Any = 0 UpperCamelCase :List[Any] = tqdm(desc="""downloading real regularization images""" , total=__magic_name__ ) with open(f"""{class_data_dir}/caption.txt""" , """w""" ) as fa, open(f"""{class_data_dir}/urls.txt""" , """w""" ) as fa, open( f"""{class_data_dir}/images.txt""" , """w""" ) as fa: while total < num_class_images: UpperCamelCase :str = class_images[count] count += 1 try: UpperCamelCase :Optional[int] = requests.get(images["""url"""] ) if img.status_code == 200: UpperCamelCase :List[Any] = Image.open(BytesIO(img.content ) ) with open(f"""{class_data_dir}/images/{total}.jpg""" , """wb""" ) as f: f.write(img.content ) fa.write(images["""caption"""] + """\n""" ) fa.write(images["""url"""] + """\n""" ) fa.write(f"""{class_data_dir}/images/{total}.jpg""" + """\n""" ) total += 1 pbar.update(1 ) else: continue except Exception: continue return def SCREAMING_SNAKE_CASE_ ( ) -> List[Any]: """simple docstring""" UpperCamelCase :Optional[Any] = argparse.ArgumentParser("""""" , add_help=__magic_name__ ) parser.add_argument("""--class_prompt""" , help="""text prompt to retrieve images""" , required=__magic_name__ , type=__magic_name__ ) parser.add_argument("""--class_data_dir""" , help="""path to save images""" , required=__magic_name__ , type=__magic_name__ ) parser.add_argument("""--num_class_images""" , help="""number of images to download""" , default=200 , type=__magic_name__ ) return parser.parse_args() if __name__ == "__main__": UpperCAmelCase_ : Tuple = parse_args() retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
38
from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline else: from .camera import create_pan_cameras from .pipeline_shap_e import ShapEPipeline from .pipeline_shap_e_img2img import ShapEImgaImgPipeline from .renderer import ( BoundingBoxVolume, ImportanceRaySampler, MLPNeRFModelOutput, MLPNeRSTFModel, ShapEParamsProjModel, ShapERenderer, StratifiedRaySampler, VoidNeRFModel, )
38
1
import datasets import faiss import numpy as np import streamlit as st import torch from elasticsearch import Elasticsearch from elia_utils import ( embed_questions_for_retrieval, make_qa_sas_model, qa_sas_generate, query_es_index, query_qa_dense_index, ) import transformers from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer UpperCAmelCase_ : int = '''bart''' UpperCAmelCase_ : int = True @st.cache(allow_output_mutation=__magic_name__ ) def SCREAMING_SNAKE_CASE_ ( ) -> Optional[int]: """simple docstring""" if LOAD_DENSE_INDEX: UpperCamelCase :Optional[Any] = AutoTokenizer.from_pretrained("""yjernite/retribert-base-uncased""" ) UpperCamelCase :Union[str, Any] = AutoModel.from_pretrained("""yjernite/retribert-base-uncased""" ).to("""cuda:0""" ) UpperCamelCase :Optional[int] = qar_model.eval() else: UpperCamelCase , UpperCamelCase :Optional[int] = (None, None) if MODEL_TYPE == "bart": UpperCamelCase :Any = AutoTokenizer.from_pretrained("""yjernite/bart_eli5""" ) UpperCamelCase :Union[str, Any] = AutoModelForSeqaSeqLM.from_pretrained("""yjernite/bart_eli5""" ).to("""cuda:0""" ) UpperCamelCase :Optional[int] = torch.load("""seq2seq_models/eli5_bart_model_blm_2.pth""" ) sas_model.load_state_dict(save_dict["""model"""] ) UpperCamelCase :Optional[Any] = sas_model.eval() else: UpperCamelCase , UpperCamelCase :Dict = make_qa_sas_model( model_name="""t5-small""" , from_file="""seq2seq_models/eli5_t5_model_1024_4.pth""" , device="""cuda:0""" ) return (qar_tokenizer, qar_model, sas_tokenizer, sas_model) @st.cache(allow_output_mutation=__magic_name__ ) def SCREAMING_SNAKE_CASE_ ( ) -> Union[str, Any]: """simple docstring""" if LOAD_DENSE_INDEX: UpperCamelCase :Tuple = faiss.StandardGpuResources() UpperCamelCase :Dict = datasets.load_dataset(path="""wiki_snippets""" , name="""wiki40b_en_100_0""" )["""train"""] UpperCamelCase :str = np.memmap( """wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat""" , dtype="""float32""" , mode="""r""" , shape=(wikiaab_passages.num_rows, 128) , ) UpperCamelCase :Optional[Any] = faiss.IndexFlatIP(128 ) UpperCamelCase :Optional[int] = faiss.index_cpu_to_gpu(__magic_name__ , 1 , __magic_name__ ) wikiaab_gpu_index_flat.add(__magic_name__ ) # TODO fix for larger GPU else: UpperCamelCase , UpperCamelCase :Optional[Any] = (None, None) UpperCamelCase :List[str] = Elasticsearch([{"""host""": """localhost""", """port""": """9200"""}] ) return (wikiaab_passages, wikiaab_gpu_index_flat, es_client) @st.cache(allow_output_mutation=__magic_name__ ) def SCREAMING_SNAKE_CASE_ ( ) -> str: """simple docstring""" UpperCamelCase :Dict = datasets.load_dataset("""eli5""" , name="""LFQA_reddit""" ) UpperCamelCase :List[Any] = elia["""train_eli5"""] UpperCamelCase :Optional[Any] = np.memmap( """eli5_questions_reps.dat""" , dtype="""float32""" , mode="""r""" , shape=(elia_train.num_rows, 128) ) UpperCamelCase :Dict = faiss.IndexFlatIP(128 ) eli5_train_q_index.add(__magic_name__ ) return (elia_train, eli5_train_q_index) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = load_indexes() UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Dict = load_models() UpperCAmelCase_ , UpperCAmelCase_ : str = load_train_data() def SCREAMING_SNAKE_CASE_ ( __magic_name__ : int , __magic_name__ : Any=10 ) -> Any: """simple docstring""" UpperCamelCase :List[str] = embed_questions_for_retrieval([question] , __magic_name__ , __magic_name__ ) UpperCamelCase , UpperCamelCase :int = eli5_train_q_index.search(__magic_name__ , __magic_name__ ) UpperCamelCase :Any = [elia_train[int(__magic_name__ )] for i in I[0]] return nn_examples def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Union[str, Any] , __magic_name__ : List[str]="wiki40b" , __magic_name__ : str="dense" , __magic_name__ : Tuple=10 ) -> List[str]: """simple docstring""" if source == "none": UpperCamelCase , UpperCamelCase :Dict = (""" <P> """.join(["""""" for _ in range(11 )] ).strip(), []) else: if method == "dense": UpperCamelCase , UpperCamelCase :List[Any] = query_qa_dense_index( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) else: UpperCamelCase , UpperCamelCase :List[Any] = query_es_index( __magic_name__ , __magic_name__ , index_name="""english_wiki40b_snippets_100w""" , n_results=__magic_name__ , ) UpperCamelCase :str = [ (res["""article_title"""], res["""section_title"""].strip(), res["""score"""], res["""passage_text"""]) for res in hit_lst ] UpperCamelCase :Tuple = """question: {} context: {}""".format(__magic_name__ , __magic_name__ ) return question_doc, support_list @st.cache( hash_funcs={ torch.Tensor: (lambda __magic_name__ : None), transformers.models.bart.tokenization_bart.BartTokenizer: (lambda __magic_name__ : None), } ) def SCREAMING_SNAKE_CASE_ ( __magic_name__ : List[str] , __magic_name__ : Union[str, Any] , __magic_name__ : List[str] , __magic_name__ : Optional[Any]=64 , __magic_name__ : int=256 , __magic_name__ : Dict=False , __magic_name__ : str=2 , __magic_name__ : str=0.95 , __magic_name__ : Dict=0.8 ) -> Dict: """simple docstring""" with torch.no_grad(): UpperCamelCase :Optional[Any] = qa_sas_generate( __magic_name__ , __magic_name__ , __magic_name__ , num_answers=1 , num_beams=__magic_name__ , min_len=__magic_name__ , max_len=__magic_name__ , do_sample=__magic_name__ , temp=__magic_name__ , top_p=__magic_name__ , top_k=__magic_name__ , max_input_length=1024 , device="""cuda:0""" , )[0] return (answer, support_list) st.title('''Long Form Question Answering with ELI5''') # Start sidebar UpperCAmelCase_ : List[Any] = '''<img src=\'https://huggingface.co/front/assets/huggingface_logo.svg\'>''' UpperCAmelCase_ : Union[str, Any] = ''' <html> <head> <style> .img-container { padding-left: 90px; padding-right: 90px; padding-top: 50px; padding-bottom: 50px; background-color: #f0f3f9; } </style> </head> <body> <span class="img-container"> <!-- Inline parent element --> %s </span> </body> </html> ''' % ( header_html, ) st.sidebar.markdown( header_full, unsafe_allow_html=True, ) # Long Form QA with ELI5 and Wikipedia UpperCAmelCase_ : List[str] = ''' This demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html). First, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset, a pre-processed fixed snapshot of Wikipedia. ''' st.sidebar.markdown(description, unsafe_allow_html=True) UpperCAmelCase_ : Dict = [ '''Answer the question''', '''View the retrieved document only''', '''View the most similar ELI5 question and answer''', '''Show me everything, please!''', ] UpperCAmelCase_ : Tuple = st.sidebar.checkbox('''Demo options''') if demo_options: UpperCAmelCase_ : str = st.sidebar.selectbox( '''''', action_list, index=3, ) UpperCAmelCase_ : str = action_list.index(action_st) UpperCAmelCase_ : int = st.sidebar.selectbox( '''''', ['''Show full text of passages''', '''Show passage section titles'''], index=0, ) UpperCAmelCase_ : str = show_type == '''Show full text of passages''' else: UpperCAmelCase_ : str = 3 UpperCAmelCase_ : List[Any] = True UpperCAmelCase_ : Optional[Any] = st.sidebar.checkbox('''Retrieval options''') if retrieval_options: UpperCAmelCase_ : Any = ''' ### Information retriever options The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs. The answer is then generated by sequence to sequence model which takes the question and retrieved document as input. ''' st.sidebar.markdown(retriever_info) UpperCAmelCase_ : int = st.sidebar.selectbox('''Which Wikipedia format should the model use?''', ['''wiki40b''', '''none''']) UpperCAmelCase_ : List[Any] = st.sidebar.selectbox('''Which Wikipedia indexer should the model use?''', ['''dense''', '''sparse''', '''mixed''']) else: UpperCAmelCase_ : Optional[Any] = '''wiki40b''' UpperCAmelCase_ : Any = '''dense''' UpperCAmelCase_ : int = '''beam''' UpperCAmelCase_ : Optional[int] = 2 UpperCAmelCase_ : Optional[Any] = 64 UpperCAmelCase_ : str = 2_56 UpperCAmelCase_ : Any = None UpperCAmelCase_ : List[Any] = None UpperCAmelCase_ : str = st.sidebar.checkbox('''Generation options''') if generate_options: UpperCAmelCase_ : Optional[Any] = ''' ### Answer generation options The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large) weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with **beam** search, or **sample** from the decoder\'s output probabilities. ''' st.sidebar.markdown(generate_info) UpperCAmelCase_ : Optional[Any] = st.sidebar.selectbox('''Would you like to use beam search or sample an answer?''', ['''beam''', '''sampled''']) UpperCAmelCase_ : int = st.sidebar.slider( '''Minimum generation length''', min_value=8, max_value=2_56, value=64, step=8, format=None, key=None ) UpperCAmelCase_ : Optional[Any] = st.sidebar.slider( '''Maximum generation length''', min_value=64, max_value=5_12, value=2_56, step=16, format=None, key=None ) if sampled == "beam": UpperCAmelCase_ : Optional[Any] = st.sidebar.slider('''Beam size''', min_value=1, max_value=8, value=2, step=None, format=None, key=None) else: UpperCAmelCase_ : int = st.sidebar.slider( '''Nucleus sampling p''', min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None ) UpperCAmelCase_ : Tuple = st.sidebar.slider( '''Temperature''', min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None ) UpperCAmelCase_ : Optional[int] = None # start main text UpperCAmelCase_ : Optional[int] = [ '''<MY QUESTION>''', '''How do people make chocolate?''', '''Why do we get a fever when we are sick?''', '''How can different animals perceive different colors?''', '''What is natural language processing?''', '''What\'s the best way to treat a sunburn?''', '''What exactly are vitamins ?''', '''How does nuclear energy provide electricity?''', '''What\'s the difference between viruses and bacteria?''', '''Why are flutes classified as woodwinds when most of them are made out of metal ?''', '''Why do people like drinking coffee even though it tastes so bad?''', '''What happens when wine ages? How does it make the wine taste better?''', '''If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?''', '''How can we set a date to the beginning or end of an artistic period? Doesn\'t the change happen gradually?''', '''How does New Zealand have so many large bird predators?''', ] UpperCAmelCase_ : List[Any] = st.selectbox( '''What would you like to ask? ---- select <MY QUESTION> to enter a new query''', questions_list, index=1, ) if question_s == "<MY QUESTION>": UpperCAmelCase_ : Union[str, Any] = st.text_input('''Enter your question here:''', '''''') else: UpperCAmelCase_ : Optional[Any] = question_s if st.button('''Show me!'''): if action in [0, 1, 3]: if index_type == "mixed": UpperCAmelCase_ , UpperCAmelCase_ : Dict = make_support(question, source=wiki_source, method='''dense''', n_results=10) UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = make_support(question, source=wiki_source, method='''sparse''', n_results=10) UpperCAmelCase_ : Tuple = [] for res_d, res_s in zip(support_list_dense, support_list_sparse): if tuple(res_d) not in support_list: support_list += [tuple(res_d)] if tuple(res_s) not in support_list: support_list += [tuple(res_s)] UpperCAmelCase_ : Any = support_list[:10] UpperCAmelCase_ : Optional[Any] = '''<P> ''' + ''' <P> '''.join([res[-1] for res in support_list]) else: UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = make_support(question, source=wiki_source, method=index_type, n_results=10) if action in [0, 3]: UpperCAmelCase_ , UpperCAmelCase_ : str = answer_question( question_doc, sas_model, sas_tokenizer, min_len=min_len, max_len=int(max_len), sampling=(sampled == '''sampled'''), n_beams=n_beams, top_p=top_p, temp=temp, ) st.markdown('''### The model generated answer is:''') st.write(answer) if action in [0, 1, 3] and wiki_source != "none": st.markdown('''--- \n ### The model is drawing information from the following Wikipedia passages:''') for i, res in enumerate(support_list): UpperCAmelCase_ : Any = '''https://en.wikipedia.org/wiki/{}'''.format(res[0].replace(''' ''', '''_''')) UpperCAmelCase_ : List[Any] = res[1].strip() if sec_titles == "": UpperCAmelCase_ : Union[str, Any] = '''[{}]({})'''.format(res[0], wiki_url) else: UpperCAmelCase_ : str = sec_titles.split(''' & ''') UpperCAmelCase_ : str = ''' & '''.join( ['''[{}]({}#{})'''.format(sec.strip(), wiki_url, sec.strip().replace(''' ''', '''_''')) for sec in sec_list] ) st.markdown( '''{0:02d} - **Article**: {1:<18} <br> _Section_: {2}'''.format(i + 1, res[0], sections), unsafe_allow_html=True, ) if show_passages: st.write( '''> <span style="font-family:arial; font-size:10pt;">''' + res[-1] + '''</span>''', unsafe_allow_html=True ) if action in [2, 3]: UpperCAmelCase_ : Any = find_nearest_training(question) UpperCAmelCase_ : Optional[int] = nn_train_list[0] st.markdown( '''--- \n ### The most similar question in the ELI5 training set was: \n\n {}'''.format(train_exple['''title''']) ) UpperCAmelCase_ : Dict = [ '''{}. {}'''.format(i + 1, ''' \n'''.join([line.strip() for line in ans.split('''\n''') if line.strip() != ''''''])) for i, (ans, sc) in enumerate(zip(train_exple['''answers''']['''text'''], train_exple['''answers''']['''score'''])) if i == 0 or sc > 2 ] st.markdown('''##### Its answers were: \n\n {}'''.format('''\n'''.join(answers_st))) UpperCAmelCase_ : int = ''' --- **Disclaimer** *The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system. Evaluating biases of such a model and ensuring factual generations are still very much open research problems. Therefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.* ''' st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
38
import re import string import numpy as np import datasets UpperCAmelCase_ : Dict = ''' Returns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list. ''' UpperCAmelCase_ : Any = ''' Args: predictions: List of predicted texts. references: List of reference texts. regexes_to_ignore: List, defaults to None. Regex expressions of characters to ignore when calculating the exact matches. Note: these regexes are removed from the input data before the changes based on the options below (e.g. ignore_case, ignore_punctuation, ignore_numbers) are applied. ignore_case: Boolean, defaults to False. If true, turns everything to lowercase so that capitalization differences are ignored. ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before comparing predictions and references. ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before comparing predictions and references. Returns: exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive. Examples: >>> exact_match = datasets.load_metric("exact_match") >>> refs = ["the cat", "theater", "YELLING", "agent007"] >>> preds = ["cat?", "theater", "yelling", "agent"] >>> results = exact_match.compute(references=refs, predictions=preds) >>> print(round(results["exact_match"], 1)) 25.0 >>> exact_match = datasets.load_metric("exact_match") >>> refs = ["the cat", "theater", "YELLING", "agent007"] >>> preds = ["cat?", "theater", "yelling", "agent"] >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell"], ignore_case=True, ignore_punctuation=True) >>> print(round(results["exact_match"], 1)) 50.0 >>> exact_match = datasets.load_metric("exact_match") >>> refs = ["the cat", "theater", "YELLING", "agent007"] >>> preds = ["cat?", "theater", "yelling", "agent"] >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True) >>> print(round(results["exact_match"], 1)) 75.0 >>> exact_match = datasets.load_metric("exact_match") >>> refs = ["the cat", "theater", "YELLING", "agent007"] >>> preds = ["cat?", "theater", "yelling", "agent"] >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True) >>> print(round(results["exact_match"], 1)) 100.0 >>> exact_match = datasets.load_metric("exact_match") >>> refs = ["The cat sat on the mat.", "Theaters are great.", "It\'s like comparing oranges and apples."] >>> preds = ["The cat sat on the mat?", "Theaters are great.", "It\'s like comparing apples and oranges."] >>> results = exact_match.compute(references=refs, predictions=preds) >>> print(round(results["exact_match"], 1)) 33.3 ''' UpperCAmelCase_ : Tuple = ''' ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class _SCREAMING_SNAKE_CASE ( datasets.Metric ): def _A ( self : Optional[int] ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Value("""string""" , id="""sequence""" ), """references""": datasets.Value("""string""" , id="""sequence""" ), } ) , reference_urls=[] , ) def _A ( self : Optional[int] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[str] , __lowerCamelCase : List[str]=None , __lowerCamelCase : Optional[int]=False , __lowerCamelCase : int=False , __lowerCamelCase : Optional[int]=False , ): if regexes_to_ignore is not None: for s in regexes_to_ignore: UpperCamelCase :str = np.array([re.sub(__lowerCamelCase , """""" , __lowerCamelCase ) for x in predictions] ) UpperCamelCase :Tuple = np.array([re.sub(__lowerCamelCase , """""" , __lowerCamelCase ) for x in references] ) else: UpperCamelCase :Any = np.asarray(__lowerCamelCase ) UpperCamelCase :str = np.asarray(__lowerCamelCase ) if ignore_case: UpperCamelCase :Tuple = np.char.lower(__lowerCamelCase ) UpperCamelCase :Any = np.char.lower(__lowerCamelCase ) if ignore_punctuation: UpperCamelCase :Optional[int] = string.punctuation.maketrans("""""" , """""" , string.punctuation ) UpperCamelCase :Optional[Any] = np.char.translate(__lowerCamelCase , table=__lowerCamelCase ) UpperCamelCase :List[str] = np.char.translate(__lowerCamelCase , table=__lowerCamelCase ) if ignore_numbers: UpperCamelCase :Tuple = string.digits.maketrans("""""" , """""" , string.digits ) UpperCamelCase :Dict = np.char.translate(__lowerCamelCase , table=__lowerCamelCase ) UpperCamelCase :Tuple = np.char.translate(__lowerCamelCase , table=__lowerCamelCase ) UpperCamelCase :int = predictions == references return {"exact_match": np.mean(__lowerCamelCase ) * 100}
38
1
import datasets from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py UpperCAmelCase_ : List[str] = '''\ @INPROCEEDINGS{Papineni02bleu:a, author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu}, title = {BLEU: a Method for Automatic Evaluation of Machine Translation}, booktitle = {}, year = {2002}, pages = {311--318} } @inproceedings{lin-och-2004-orange, title = "{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation", author = "Lin, Chin-Yew and Och, Franz Josef", booktitle = "{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics", month = "aug 23{--}aug 27", year = "2004", address = "Geneva, Switzerland", publisher = "COLING", url = "https://www.aclweb.org/anthology/C04-1072", pages = "501--507", } ''' UpperCAmelCase_ : Optional[Any] = '''\ BLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another. Quality is considered to be the correspondence between a machine\'s output and that of a human: "the closer a machine translation is to a professional human translation, the better it is" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and remains one of the most popular automated and inexpensive metrics. Scores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations. Those scores are then averaged over the whole corpus to reach an estimate of the translation\'s overall quality. Intelligibility or grammatical correctness are not taken into account[citation needed]. BLEU\'s output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1 representing more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the reference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional reference translations will increase the BLEU score. ''' UpperCAmelCase_ : Union[str, Any] = ''' Computes BLEU score of translated segments against one or more references. Args: predictions: list of translations to score. Each translation should be tokenized into a list of tokens. references: list of lists of references for each translation. Each reference should be tokenized into a list of tokens. max_order: Maximum n-gram order to use when computing BLEU score. smooth: Whether or not to apply Lin et al. 2004 smoothing. Returns: \'bleu\': bleu score, \'precisions\': geometric mean of n-gram precisions, \'brevity_penalty\': brevity penalty, \'length_ratio\': ratio of lengths, \'translation_length\': translation_length, \'reference_length\': reference_length Examples: >>> predictions = [ ... ["hello", "there", "general", "kenobi"], # tokenized prediction of the first sample ... ["foo", "bar", "foobar"] # tokenized prediction of the second sample ... ] >>> references = [ ... [["hello", "there", "general", "kenobi"], ["hello", "there", "!"]], # tokenized references for the first sample (2 references) ... [["foo", "bar", "foobar"]] # tokenized references for the second sample (1 reference) ... ] >>> bleu = datasets.load_metric("bleu") >>> results = bleu.compute(predictions=predictions, references=references) >>> print(results["bleu"]) 1.0 ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class _SCREAMING_SNAKE_CASE ( datasets.Metric ): def _A ( self : int ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ), """references""": datasets.Sequence( datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ) , id="""references""" ), } ) , codebase_urls=["""https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py"""] , reference_urls=[ """https://en.wikipedia.org/wiki/BLEU""", """https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213""", ] , ) def _A ( self : Tuple , __lowerCamelCase : List[str] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[int]=4 , __lowerCamelCase : Optional[Any]=False ): UpperCamelCase :List[Any] = compute_bleu( reference_corpus=__lowerCamelCase , translation_corpus=__lowerCamelCase , max_order=__lowerCamelCase , smooth=__lowerCamelCase ) ((UpperCamelCase) , (UpperCamelCase) , (UpperCamelCase) , (UpperCamelCase) , (UpperCamelCase) , (UpperCamelCase)) :List[Any] = score return { "bleu": bleu, "precisions": precisions, "brevity_penalty": bp, "length_ratio": ratio, "translation_length": translation_length, "reference_length": reference_length, }
38
from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...onnx.utils import compute_effective_axis_dimension from ...utils import logging if TYPE_CHECKING: from ...processing_utils import ProcessorMixin from ...utils import TensorType UpperCAmelCase_ : Dict = logging.get_logger(__name__) UpperCAmelCase_ : str = { '''microsoft/layoutlmv3-base''': '''https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json''', } class _SCREAMING_SNAKE_CASE ( _a ): snake_case__ : Optional[int] = """layoutlmv3""" def __init__( self : List[Any] , __lowerCamelCase : Optional[Any]=50_265 , __lowerCamelCase : Dict=768 , __lowerCamelCase : Any=12 , __lowerCamelCase : int=12 , __lowerCamelCase : str=3_072 , __lowerCamelCase : List[Any]="gelu" , __lowerCamelCase : List[str]=0.1 , __lowerCamelCase : Optional[int]=0.1 , __lowerCamelCase : Optional[Any]=512 , __lowerCamelCase : Optional[int]=2 , __lowerCamelCase : Union[str, Any]=0.02 , __lowerCamelCase : Union[str, Any]=1E-5 , __lowerCamelCase : Any=1 , __lowerCamelCase : Optional[int]=0 , __lowerCamelCase : List[Any]=2 , __lowerCamelCase : Dict=1_024 , __lowerCamelCase : List[Any]=128 , __lowerCamelCase : str=128 , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : str=32 , __lowerCamelCase : List[Any]=128 , __lowerCamelCase : str=64 , __lowerCamelCase : List[str]=256 , __lowerCamelCase : Dict=True , __lowerCamelCase : List[Any]=True , __lowerCamelCase : Tuple=True , __lowerCamelCase : Tuple=224 , __lowerCamelCase : Tuple=3 , __lowerCamelCase : Dict=16 , __lowerCamelCase : Union[str, Any]=None , **__lowerCamelCase : Optional[Any] , ): super().__init__( vocab_size=__lowerCamelCase , hidden_size=__lowerCamelCase , num_hidden_layers=__lowerCamelCase , num_attention_heads=__lowerCamelCase , intermediate_size=__lowerCamelCase , hidden_act=__lowerCamelCase , hidden_dropout_prob=__lowerCamelCase , attention_probs_dropout_prob=__lowerCamelCase , max_position_embeddings=__lowerCamelCase , type_vocab_size=__lowerCamelCase , initializer_range=__lowerCamelCase , layer_norm_eps=__lowerCamelCase , pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase , ) UpperCamelCase :int = max_ad_position_embeddings UpperCamelCase :Tuple = coordinate_size UpperCamelCase :List[Any] = shape_size UpperCamelCase :Union[str, Any] = has_relative_attention_bias UpperCamelCase :Any = rel_pos_bins UpperCamelCase :Optional[Any] = max_rel_pos UpperCamelCase :str = has_spatial_attention_bias UpperCamelCase :Tuple = rel_ad_pos_bins UpperCamelCase :Optional[int] = max_rel_ad_pos UpperCamelCase :Tuple = text_embed UpperCamelCase :str = visual_embed UpperCamelCase :Optional[Any] = input_size UpperCamelCase :str = num_channels UpperCamelCase :List[Any] = patch_size UpperCamelCase :Optional[Any] = classifier_dropout class _SCREAMING_SNAKE_CASE ( _a ): snake_case__ : int = version.parse("""1.12""" ) @property def _A ( self : Optional[int] ): # The order of inputs is different for question answering and sequence classification if self.task in ["question-answering", "sequence-classification"]: return OrderedDict( [ ("""input_ids""", {0: """batch""", 1: """sequence"""}), ("""attention_mask""", {0: """batch""", 1: """sequence"""}), ("""bbox""", {0: """batch""", 1: """sequence"""}), ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) else: return OrderedDict( [ ("""input_ids""", {0: """batch""", 1: """sequence"""}), ("""bbox""", {0: """batch""", 1: """sequence"""}), ("""attention_mask""", {0: """batch""", 1: """sequence"""}), ("""pixel_values""", {0: """batch""", 1: """num_channels"""}), ] ) @property def _A ( self : str ): return 1E-5 @property def _A ( self : Dict ): return 12 def _A ( self : Dict , __lowerCamelCase : "ProcessorMixin" , __lowerCamelCase : int = -1 , __lowerCamelCase : int = -1 , __lowerCamelCase : bool = False , __lowerCamelCase : Optional["TensorType"] = None , __lowerCamelCase : int = 3 , __lowerCamelCase : int = 40 , __lowerCamelCase : int = 40 , ): setattr(processor.image_processor , """apply_ocr""" , __lowerCamelCase ) # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX UpperCamelCase :Optional[Any] = compute_effective_axis_dimension( __lowerCamelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX UpperCamelCase :Optional[int] = processor.tokenizer.num_special_tokens_to_add(__lowerCamelCase ) UpperCamelCase :int = compute_effective_axis_dimension( __lowerCamelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__lowerCamelCase ) # Generate dummy inputs according to compute batch and sequence UpperCamelCase :Any = [[""" """.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size # Generate dummy bounding boxes UpperCamelCase :Optional[Any] = [[[48, 84, 73, 128]]] * batch_size # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX # batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch) UpperCamelCase :List[str] = self._generate_dummy_images(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) UpperCamelCase :Any = dict( processor( __lowerCamelCase , text=__lowerCamelCase , boxes=__lowerCamelCase , return_tensors=__lowerCamelCase , ) ) return inputs
38
1
import re import string import numpy as np import datasets UpperCAmelCase_ : Dict = ''' Returns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list. ''' UpperCAmelCase_ : Any = ''' Args: predictions: List of predicted texts. references: List of reference texts. regexes_to_ignore: List, defaults to None. Regex expressions of characters to ignore when calculating the exact matches. Note: these regexes are removed from the input data before the changes based on the options below (e.g. ignore_case, ignore_punctuation, ignore_numbers) are applied. ignore_case: Boolean, defaults to False. If true, turns everything to lowercase so that capitalization differences are ignored. ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before comparing predictions and references. ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before comparing predictions and references. Returns: exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive. Examples: >>> exact_match = datasets.load_metric("exact_match") >>> refs = ["the cat", "theater", "YELLING", "agent007"] >>> preds = ["cat?", "theater", "yelling", "agent"] >>> results = exact_match.compute(references=refs, predictions=preds) >>> print(round(results["exact_match"], 1)) 25.0 >>> exact_match = datasets.load_metric("exact_match") >>> refs = ["the cat", "theater", "YELLING", "agent007"] >>> preds = ["cat?", "theater", "yelling", "agent"] >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell"], ignore_case=True, ignore_punctuation=True) >>> print(round(results["exact_match"], 1)) 50.0 >>> exact_match = datasets.load_metric("exact_match") >>> refs = ["the cat", "theater", "YELLING", "agent007"] >>> preds = ["cat?", "theater", "yelling", "agent"] >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True) >>> print(round(results["exact_match"], 1)) 75.0 >>> exact_match = datasets.load_metric("exact_match") >>> refs = ["the cat", "theater", "YELLING", "agent007"] >>> preds = ["cat?", "theater", "yelling", "agent"] >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True) >>> print(round(results["exact_match"], 1)) 100.0 >>> exact_match = datasets.load_metric("exact_match") >>> refs = ["The cat sat on the mat.", "Theaters are great.", "It\'s like comparing oranges and apples."] >>> preds = ["The cat sat on the mat?", "Theaters are great.", "It\'s like comparing apples and oranges."] >>> results = exact_match.compute(references=refs, predictions=preds) >>> print(round(results["exact_match"], 1)) 33.3 ''' UpperCAmelCase_ : Tuple = ''' ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class _SCREAMING_SNAKE_CASE ( datasets.Metric ): def _A ( self : Optional[int] ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Value("""string""" , id="""sequence""" ), """references""": datasets.Value("""string""" , id="""sequence""" ), } ) , reference_urls=[] , ) def _A ( self : Optional[int] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[str] , __lowerCamelCase : List[str]=None , __lowerCamelCase : Optional[int]=False , __lowerCamelCase : int=False , __lowerCamelCase : Optional[int]=False , ): if regexes_to_ignore is not None: for s in regexes_to_ignore: UpperCamelCase :str = np.array([re.sub(__lowerCamelCase , """""" , __lowerCamelCase ) for x in predictions] ) UpperCamelCase :Tuple = np.array([re.sub(__lowerCamelCase , """""" , __lowerCamelCase ) for x in references] ) else: UpperCamelCase :Any = np.asarray(__lowerCamelCase ) UpperCamelCase :str = np.asarray(__lowerCamelCase ) if ignore_case: UpperCamelCase :Tuple = np.char.lower(__lowerCamelCase ) UpperCamelCase :Any = np.char.lower(__lowerCamelCase ) if ignore_punctuation: UpperCamelCase :Optional[int] = string.punctuation.maketrans("""""" , """""" , string.punctuation ) UpperCamelCase :Optional[Any] = np.char.translate(__lowerCamelCase , table=__lowerCamelCase ) UpperCamelCase :List[str] = np.char.translate(__lowerCamelCase , table=__lowerCamelCase ) if ignore_numbers: UpperCamelCase :Tuple = string.digits.maketrans("""""" , """""" , string.digits ) UpperCamelCase :Dict = np.char.translate(__lowerCamelCase , table=__lowerCamelCase ) UpperCamelCase :Tuple = np.char.translate(__lowerCamelCase , table=__lowerCamelCase ) UpperCamelCase :int = predictions == references return {"exact_match": np.mean(__lowerCamelCase ) * 100}
38
import gc import random import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import ( AutoencoderKL, DiffusionPipeline, EulerDiscreteScheduler, StableDiffusionXLImgaImgPipeline, UNetaDConditionModel, ) from diffusers.utils import floats_tensor, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class _SCREAMING_SNAKE_CASE ( _a , _a , unittest.TestCase ): snake_case__ : Any = StableDiffusionXLImgaImgPipeline snake_case__ : Tuple = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width"""} snake_case__ : Tuple = PipelineTesterMixin.required_optional_params - {"""latents"""} snake_case__ : Any = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS snake_case__ : List[str] = IMAGE_TO_IMAGE_IMAGE_PARAMS snake_case__ : Tuple = IMAGE_TO_IMAGE_IMAGE_PARAMS def _A ( self : int ): torch.manual_seed(0 ) UpperCamelCase :Any = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , attention_head_dim=(2, 4) , use_linear_projection=__lowerCamelCase , addition_embed_type="""text_time""" , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=80 , cross_attention_dim=64 , ) UpperCamelCase :Tuple = EulerDiscreteScheduler( beta_start=0.00085 , beta_end=0.012 , steps_offset=1 , beta_schedule="""scaled_linear""" , timestep_spacing="""leading""" , ) torch.manual_seed(0 ) UpperCamelCase :Union[str, Any] = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , ) torch.manual_seed(0 ) UpperCamelCase :Optional[int] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act="""gelu""" , projection_dim=32 , ) UpperCamelCase :Any = CLIPTextModel(__lowerCamelCase ) UpperCamelCase :List[Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" , local_files_only=__lowerCamelCase ) UpperCamelCase :List[Any] = CLIPTextModelWithProjection(__lowerCamelCase ) UpperCamelCase :int = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" , local_files_only=__lowerCamelCase ) UpperCamelCase :Union[str, Any] = { """unet""": unet, """scheduler""": scheduler, """vae""": vae, """text_encoder""": text_encoder, """tokenizer""": tokenizer, """text_encoder_2""": text_encoder_a, """tokenizer_2""": tokenizer_a, # "safety_checker": None, # "feature_extractor": None, } return components def _A ( self : Tuple , __lowerCamelCase : Any , __lowerCamelCase : Optional[Any]=0 ): UpperCamelCase :Tuple = floats_tensor((1, 3, 32, 32) , rng=random.Random(__lowerCamelCase ) ).to(__lowerCamelCase ) UpperCamelCase :List[str] = image / 2 + 0.5 if str(__lowerCamelCase ).startswith("""mps""" ): UpperCamelCase :Any = torch.manual_seed(__lowerCamelCase ) else: UpperCamelCase :List[Any] = torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase ) UpperCamelCase :str = { """prompt""": """A painting of a squirrel eating a burger""", """image""": image, """generator""": generator, """num_inference_steps""": 2, """guidance_scale""": 5.0, """output_type""": """numpy""", """strength""": 0.75, } return inputs def _A ( self : str ): UpperCamelCase :List[str] = """cpu""" # ensure determinism for the device-dependent torch.Generator UpperCamelCase :Optional[Any] = self.get_dummy_components() UpperCamelCase :List[Any] = StableDiffusionXLImgaImgPipeline(**__lowerCamelCase ) UpperCamelCase :Any = sd_pipe.to(__lowerCamelCase ) sd_pipe.set_progress_bar_config(disable=__lowerCamelCase ) UpperCamelCase :Optional[Any] = self.get_dummy_inputs(__lowerCamelCase ) UpperCamelCase :Union[str, Any] = sd_pipe(**__lowerCamelCase ).images UpperCamelCase :Union[str, Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) UpperCamelCase :List[Any] = np.array([0.4656, 0.4840, 0.4439, 0.6698, 0.5574, 0.4524, 0.5799, 0.5943, 0.5165] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def _A ( self : Dict ): super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 ) def _A ( self : Optional[Any] ): super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) def _A ( self : Union[str, Any] ): pass def _A ( self : Optional[int] ): UpperCamelCase :Union[str, Any] = self.get_dummy_components() UpperCamelCase :Dict = StableDiffusionXLImgaImgPipeline(**__lowerCamelCase ) UpperCamelCase :List[Any] = sd_pipe.to(__lowerCamelCase ) UpperCamelCase :List[str] = sd_pipe.to(__lowerCamelCase ) sd_pipe.set_progress_bar_config(disable=__lowerCamelCase ) # forward without prompt embeds UpperCamelCase :List[Any] = self.get_dummy_inputs(__lowerCamelCase ) UpperCamelCase :int = 3 * ["""this is a negative prompt"""] UpperCamelCase :Union[str, Any] = negative_prompt UpperCamelCase :Union[str, Any] = 3 * [inputs["""prompt"""]] UpperCamelCase :Dict = sd_pipe(**__lowerCamelCase ) UpperCamelCase :Union[str, Any] = output.images[0, -3:, -3:, -1] # forward with prompt embeds UpperCamelCase :Union[str, Any] = self.get_dummy_inputs(__lowerCamelCase ) UpperCamelCase :Optional[int] = 3 * ["""this is a negative prompt"""] UpperCamelCase :Union[str, Any] = 3 * [inputs.pop("""prompt""" )] ( ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ) :Union[str, Any] = sd_pipe.encode_prompt(__lowerCamelCase , negative_prompt=__lowerCamelCase ) UpperCamelCase :Dict = sd_pipe( **__lowerCamelCase , prompt_embeds=__lowerCamelCase , negative_prompt_embeds=__lowerCamelCase , pooled_prompt_embeds=__lowerCamelCase , negative_pooled_prompt_embeds=__lowerCamelCase , ) UpperCamelCase :Union[str, Any] = output.images[0, -3:, -3:, -1] # make sure that it's equal assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4 @slow @require_torch_gpu class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): def _A ( self : Tuple ): super().tearDown() gc.collect() torch.cuda.empty_cache() def _A ( self : List[Any] , __lowerCamelCase : Any , __lowerCamelCase : Dict="cpu" , __lowerCamelCase : List[Any]=torch.floataa , __lowerCamelCase : Tuple=0 ): UpperCamelCase :Optional[int] = torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase ) UpperCamelCase :Optional[Any] = np.random.RandomState(__lowerCamelCase ).standard_normal((1, 4, 64, 64) ) UpperCamelCase :Dict = torch.from_numpy(__lowerCamelCase ).to(device=__lowerCamelCase , dtype=__lowerCamelCase ) UpperCamelCase :str = { """prompt""": """a photograph of an astronaut riding a horse""", """latents""": latents, """generator""": generator, """num_inference_steps""": 3, """guidance_scale""": 7.5, """output_type""": """numpy""", } return inputs def _A ( self : Optional[Any] ): UpperCamelCase :Any = DiffusionPipeline.from_pretrained("""stabilityai/stable-diffusion-2-base""" ) pipe.to(__lowerCamelCase ) pipe.set_progress_bar_config(disable=__lowerCamelCase ) UpperCamelCase :Optional[Any] = self.get_inputs(__lowerCamelCase ) UpperCamelCase :Optional[int] = pipe(**__lowerCamelCase ).images UpperCamelCase :Dict = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) UpperCamelCase :Union[str, Any] = np.array([0.49493, 0.47896, 0.40798, 0.54214, 0.53212, 0.48202, 0.47656, 0.46329, 0.48506] ) assert np.abs(image_slice - expected_slice ).max() < 7E-3
38
1
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import MobileViTImageProcessor class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): def __init__( self : Any , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Dict=7 , __lowerCamelCase : List[str]=3 , __lowerCamelCase : Optional[Any]=18 , __lowerCamelCase : List[str]=30 , __lowerCamelCase : Union[str, Any]=400 , __lowerCamelCase : Dict=True , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : List[str]=True , __lowerCamelCase : Dict=None , __lowerCamelCase : Optional[Any]=True , ): UpperCamelCase :int = size if size is not None else {"""shortest_edge""": 20} UpperCamelCase :Dict = crop_size if crop_size is not None else {"""height""": 18, """width""": 18} UpperCamelCase :Any = parent UpperCamelCase :Optional[int] = batch_size UpperCamelCase :Any = num_channels UpperCamelCase :Any = image_size UpperCamelCase :int = min_resolution UpperCamelCase :List[Any] = max_resolution UpperCamelCase :Optional[int] = do_resize UpperCamelCase :List[str] = size UpperCamelCase :List[Any] = do_center_crop UpperCamelCase :List[Any] = crop_size UpperCamelCase :List[Any] = do_flip_channel_order def _A ( self : Any ): return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_flip_channel_order": self.do_flip_channel_order, } @require_torch @require_vision class _SCREAMING_SNAKE_CASE ( _a , unittest.TestCase ): snake_case__ : int = MobileViTImageProcessor if is_vision_available() else None def _A ( self : Union[str, Any] ): UpperCamelCase :List[Any] = MobileViTImageProcessingTester(self ) @property def _A ( self : List[str] ): return self.image_processor_tester.prepare_image_processor_dict() def _A ( self : int ): UpperCamelCase :Any = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__lowerCamelCase , """do_resize""" ) ) self.assertTrue(hasattr(__lowerCamelCase , """size""" ) ) self.assertTrue(hasattr(__lowerCamelCase , """do_center_crop""" ) ) self.assertTrue(hasattr(__lowerCamelCase , """center_crop""" ) ) self.assertTrue(hasattr(__lowerCamelCase , """do_flip_channel_order""" ) ) def _A ( self : int ): UpperCamelCase :List[str] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"""shortest_edge""": 20} ) self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18} ) UpperCamelCase :List[Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 ) self.assertEqual(image_processor.size , {"""shortest_edge""": 42} ) self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} ) def _A ( self : Dict ): pass def _A ( self : List[str] ): # Initialize image_processing UpperCamelCase :Dict = self.image_processing_class(**self.image_processor_dict ) # create random PIL images UpperCamelCase :Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase ) for image in image_inputs: self.assertIsInstance(__lowerCamelCase , Image.Image ) # Test not batched input UpperCamelCase :List[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) # Test batched UpperCamelCase :Dict = image_processing(__lowerCamelCase , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) def _A ( self : List[str] ): # Initialize image_processing UpperCamelCase :List[Any] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors UpperCamelCase :Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , numpify=__lowerCamelCase ) for image in image_inputs: self.assertIsInstance(__lowerCamelCase , np.ndarray ) # Test not batched input UpperCamelCase :Any = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) # Test batched UpperCamelCase :str = image_processing(__lowerCamelCase , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) def _A ( self : Optional[int] ): # Initialize image_processing UpperCamelCase :str = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors UpperCamelCase :Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , torchify=__lowerCamelCase ) for image in image_inputs: self.assertIsInstance(__lowerCamelCase , torch.Tensor ) # Test not batched input UpperCamelCase :Optional[int] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) # Test batched UpperCamelCase :str = image_processing(__lowerCamelCase , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , )
38
from ....configuration_utils import PretrainedConfig from ....utils import logging UpperCAmelCase_ : List[str] = logging.get_logger(__name__) UpperCAmelCase_ : int = { '''CarlCochet/trajectory-transformer-halfcheetah-medium-v2''': ( '''https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json''' ), # See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer } class _SCREAMING_SNAKE_CASE ( _a ): snake_case__ : Any = """trajectory_transformer""" snake_case__ : Optional[Any] = ["""past_key_values"""] snake_case__ : Tuple = { """hidden_size""": """n_embd""", """num_attention_heads""": """n_head""", """num_hidden_layers""": """n_layer""", } def __init__( self : Union[str, Any] , __lowerCamelCase : Any=100 , __lowerCamelCase : str=5 , __lowerCamelCase : str=1 , __lowerCamelCase : Optional[int]=1 , __lowerCamelCase : int=249 , __lowerCamelCase : str=6 , __lowerCamelCase : Dict=17 , __lowerCamelCase : Optional[Any]=25 , __lowerCamelCase : List[str]=4 , __lowerCamelCase : str=4 , __lowerCamelCase : Tuple=128 , __lowerCamelCase : Dict=0.1 , __lowerCamelCase : str=0.1 , __lowerCamelCase : Any=0.1 , __lowerCamelCase : int=0.0006 , __lowerCamelCase : List[str]=512 , __lowerCamelCase : str=0.02 , __lowerCamelCase : Any=1E-12 , __lowerCamelCase : int=1 , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : Tuple=1 , __lowerCamelCase : int=50_256 , __lowerCamelCase : Union[str, Any]=50_256 , **__lowerCamelCase : Dict , ): UpperCamelCase :Dict = vocab_size UpperCamelCase :int = action_weight UpperCamelCase :Tuple = reward_weight UpperCamelCase :str = value_weight UpperCamelCase :Tuple = max_position_embeddings UpperCamelCase :Tuple = block_size UpperCamelCase :Optional[int] = action_dim UpperCamelCase :int = observation_dim UpperCamelCase :List[str] = transition_dim UpperCamelCase :List[Any] = learning_rate UpperCamelCase :Optional[Any] = n_layer UpperCamelCase :Any = n_head UpperCamelCase :List[str] = n_embd UpperCamelCase :Any = embd_pdrop UpperCamelCase :str = attn_pdrop UpperCamelCase :Union[str, Any] = resid_pdrop UpperCamelCase :Optional[Any] = initializer_range UpperCamelCase :List[Any] = layer_norm_eps UpperCamelCase :Optional[int] = kaiming_initializer_range UpperCamelCase :Tuple = use_cache super().__init__(pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase )
38
1
from collections.abc import Callable from math import pi, sqrt from random import uniform from statistics import mean def SCREAMING_SNAKE_CASE_ ( __magic_name__ : int ) -> Tuple: """simple docstring""" def is_in_circle(__magic_name__ : float , __magic_name__ : float ) -> bool: UpperCamelCase :Dict = sqrt((x**2) + (y**2) ) # Our circle has a radius of 1, so a distance # greater than 1 would land outside the circle. return distance_from_centre <= 1 # The proportion of guesses that landed in the circle UpperCamelCase :List[str] = mean( int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) ) for _ in range(__magic_name__ ) ) # The ratio of the area for circle to square is pi/4. UpperCamelCase :Dict = proportion * 4 print(f"""The estimated value of pi is {pi_estimate}""" ) print(f"""The numpy value of pi is {pi}""" ) print(f"""The total error is {abs(pi - pi_estimate )}""" ) def SCREAMING_SNAKE_CASE_ ( __magic_name__ : int , __magic_name__ : Callable[[float], float] , __magic_name__ : float = 0.0 , __magic_name__ : float = 1.0 , ) -> float: """simple docstring""" return mean( function_to_integrate(uniform(__magic_name__ , __magic_name__ ) ) for _ in range(__magic_name__ ) ) * (max_value - min_value) def SCREAMING_SNAKE_CASE_ ( __magic_name__ : int , __magic_name__ : float = 0.0 , __magic_name__ : float = 1.0 ) -> None: """simple docstring""" def identity_function(__magic_name__ : float ) -> float: return x UpperCamelCase :Optional[int] = area_under_curve_estimator( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) UpperCamelCase :List[str] = (max_value * max_value - min_value * min_value) / 2 print("""******************""" ) print(f"""Estimating area under y=x where x varies from {min_value} to {max_value}""" ) print(f"""Estimated value is {estimated_value}""" ) print(f"""Expected value is {expected_value}""" ) print(f"""Total error is {abs(estimated_value - expected_value )}""" ) print("""******************""" ) def SCREAMING_SNAKE_CASE_ ( __magic_name__ : int ) -> None: """simple docstring""" def function_to_integrate(__magic_name__ : float ) -> float: return sqrt(4.0 - x * x ) UpperCamelCase :str = area_under_curve_estimator( __magic_name__ , __magic_name__ , 0.0 , 2.0 ) print("""******************""" ) print("""Estimating pi using area_under_curve_estimator""" ) print(f"""Estimated value is {estimated_value}""" ) print(f"""Expected value is {pi}""" ) print(f"""Total error is {abs(estimated_value - pi )}""" ) print("""******************""" ) if __name__ == "__main__": import doctest doctest.testmod()
38
import math import numpy as np import qiskit from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute def SCREAMING_SNAKE_CASE_ ( __magic_name__ : int = 3 ) -> qiskit.result.counts.Counts: """simple docstring""" if isinstance(__magic_name__ , __magic_name__ ): raise TypeError("""number of qubits must be a integer.""" ) if number_of_qubits <= 0: raise ValueError("""number of qubits must be > 0.""" ) if math.floor(__magic_name__ ) != number_of_qubits: raise ValueError("""number of qubits must be exact integer.""" ) if number_of_qubits > 10: raise ValueError("""number of qubits too large to simulate(>10).""" ) UpperCamelCase :int = QuantumRegister(__magic_name__ , """qr""" ) UpperCamelCase :str = ClassicalRegister(__magic_name__ , """cr""" ) UpperCamelCase :str = QuantumCircuit(__magic_name__ , __magic_name__ ) UpperCamelCase :List[Any] = number_of_qubits for i in range(__magic_name__ ): quantum_circuit.h(number_of_qubits - i - 1 ) counter -= 1 for j in range(__magic_name__ ): quantum_circuit.cp(np.pi / 2 ** (counter - j) , __magic_name__ , __magic_name__ ) for k in range(number_of_qubits // 2 ): quantum_circuit.swap(__magic_name__ , number_of_qubits - k - 1 ) # measure all the qubits quantum_circuit.measure(__magic_name__ , __magic_name__ ) # simulate with 10000 shots UpperCamelCase :str = Aer.get_backend("""qasm_simulator""" ) UpperCamelCase :Dict = execute(__magic_name__ , __magic_name__ , shots=1_0000 ) return job.result().get_counts(__magic_name__ ) if __name__ == "__main__": print( F'''Total count for quantum fourier transform state is: \ {quantum_fourier_transform(3)}''' )
38
1
from __future__ import annotations from typing import Any class _SCREAMING_SNAKE_CASE ( _a ): pass class _SCREAMING_SNAKE_CASE : def __init__( self : List[Any] , __lowerCamelCase : Any ): UpperCamelCase :Any = data UpperCamelCase :Node | None = None def __iter__( self : List[Any] ): UpperCamelCase :Union[str, Any] = self UpperCamelCase :Tuple = [] while node: if node in visited: raise ContainsLoopError visited.append(__lowerCamelCase ) yield node.data UpperCamelCase :int = node.next_node @property def _A ( self : str ): try: list(self ) return False except ContainsLoopError: return True if __name__ == "__main__": UpperCAmelCase_ : Optional[int] = Node(1) UpperCAmelCase_ : Any = Node(2) UpperCAmelCase_ : List[str] = Node(3) UpperCAmelCase_ : Union[str, Any] = Node(4) print(root_node.has_loop) # False UpperCAmelCase_ : Optional[int] = root_node.next_node print(root_node.has_loop) # True UpperCAmelCase_ : List[str] = Node(5) UpperCAmelCase_ : List[str] = Node(6) UpperCAmelCase_ : Optional[Any] = Node(5) UpperCAmelCase_ : Optional[int] = Node(6) print(root_node.has_loop) # False UpperCAmelCase_ : Dict = Node(1) print(root_node.has_loop) # False
38
import unittest from pathlib import Path from tempfile import TemporaryDirectory from transformers import AutoConfig, TFAutoModel, is_tensorflow_text_available, is_tf_available from transformers.models.bert.tokenization_bert import BertTokenizer from transformers.testing_utils import require_tensorflow_text, require_tf, slow if is_tf_available(): import tensorflow as tf if is_tensorflow_text_available(): from transformers.models.bert import TFBertTokenizer UpperCAmelCase_ : Optional[Any] = ['''bert-base-uncased''', '''bert-base-cased'''] UpperCAmelCase_ : List[str] = '''hf-internal-testing/tiny-bert-tf-only''' if is_tf_available(): class _SCREAMING_SNAKE_CASE ( tf.keras.Model ): def __init__( self : List[str] , __lowerCamelCase : Union[str, Any] ): super().__init__() UpperCamelCase :Any = tokenizer UpperCamelCase :List[str] = AutoConfig.from_pretrained(__lowerCamelCase ) UpperCamelCase :List[str] = TFAutoModel.from_config(__lowerCamelCase ) def _A ( self : Tuple , __lowerCamelCase : str ): UpperCamelCase :str = self.tokenizer(__lowerCamelCase ) UpperCamelCase :Any = self.bert(**__lowerCamelCase ) return out["pooler_output"] @require_tf @require_tensorflow_text class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): def _A ( self : Dict ): super().setUp() UpperCamelCase :int = [ BertTokenizer.from_pretrained(__lowerCamelCase ) for checkpoint in (TOKENIZER_CHECKPOINTS * 2) ] # repeat for when fast_bert_tokenizer=false UpperCamelCase :Any = [TFBertTokenizer.from_pretrained(__lowerCamelCase ) for checkpoint in TOKENIZER_CHECKPOINTS] + [ TFBertTokenizer.from_pretrained(__lowerCamelCase , use_fast_bert_tokenizer=__lowerCamelCase ) for checkpoint in TOKENIZER_CHECKPOINTS ] assert len(self.tokenizers ) == len(self.tf_tokenizers ) UpperCamelCase :Any = [ """This is a straightforward English test sentence.""", """This one has some weird characters\rto\nsee\r\nif those\u00E9break things.""", """Now we're going to add some Chinese: 一 二 三 一二三""", """And some much more rare Chinese: 齉 堃 齉堃""", """Je vais aussi écrire en français pour tester les accents""", """Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ""", ] UpperCamelCase :Union[str, Any] = list(zip(self.test_sentences , self.test_sentences[::-1] ) ) def _A ( self : Optional[int] ): for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ): for test_inputs in (self.test_sentences, self.paired_sentences): UpperCamelCase :Any = tokenizer(__lowerCamelCase , return_tensors="""tf""" , padding="""longest""" ) UpperCamelCase :str = tf_tokenizer(__lowerCamelCase ) for key in python_outputs.keys(): self.assertTrue(tf.reduce_all(python_outputs[key].shape == tf_outputs[key].shape ) ) self.assertTrue(tf.reduce_all(tf.cast(python_outputs[key] , tf.intaa ) == tf_outputs[key] ) ) @slow def _A ( self : Dict ): for tf_tokenizer in self.tf_tokenizers: UpperCamelCase :str = tf_tokenizer(self.paired_sentences ) UpperCamelCase :Any = tf_tokenizer( text=[sentence[0] for sentence in self.paired_sentences] , text_pair=[sentence[1] for sentence in self.paired_sentences] , ) for key in merged_outputs.keys(): self.assertTrue(tf.reduce_all(tf.cast(merged_outputs[key] , tf.intaa ) == separated_outputs[key] ) ) @slow def _A ( self : List[str] ): for tf_tokenizer in self.tf_tokenizers: UpperCamelCase :List[Any] = tf.function(__lowerCamelCase ) for test_inputs in (self.test_sentences, self.paired_sentences): UpperCamelCase :Any = tf.constant(__lowerCamelCase ) UpperCamelCase :List[str] = compiled_tokenizer(__lowerCamelCase ) UpperCamelCase :Optional[Any] = tf_tokenizer(__lowerCamelCase ) for key in eager_outputs.keys(): self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) ) @slow def _A ( self : Tuple ): for tf_tokenizer in self.tf_tokenizers: UpperCamelCase :List[str] = ModelToSave(tokenizer=__lowerCamelCase ) UpperCamelCase :Union[str, Any] = tf.convert_to_tensor(self.test_sentences ) UpperCamelCase :Union[str, Any] = model(__lowerCamelCase ) # Build model with some sample inputs with TemporaryDirectory() as tempdir: UpperCamelCase :List[str] = Path(__lowerCamelCase ) / """saved.model""" model.save(__lowerCamelCase ) UpperCamelCase :List[Any] = tf.keras.models.load_model(__lowerCamelCase ) UpperCamelCase :Dict = loaded_model(__lowerCamelCase ) # We may see small differences because the loaded model is compiled, so we need an epsilon for the test self.assertLessEqual(tf.reduce_max(tf.abs(out - loaded_output ) ) , 1E-5 )
38
1
import argparse import json import os from collections import OrderedDict import torch from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer from transformers.tokenization_utils_base import AddedToken @torch.no_grad() def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Optional[Any] , __magic_name__ : Optional[Any] , __magic_name__ : Union[str, Any] , __magic_name__ : Dict , __magic_name__ : str ) -> int: """simple docstring""" with open(__magic_name__ ) as metadata_file: UpperCamelCase :int = json.load(__magic_name__ ) UpperCamelCase :List[Any] = LukeConfig(use_entity_aware_attention=__magic_name__ , **metadata["""model_config"""] ) # Load in the weights from the checkpoint_path UpperCamelCase :List[str] = torch.load(__magic_name__ , map_location="""cpu""" )["""module"""] # Load the entity vocab file UpperCamelCase :Optional[int] = load_original_entity_vocab(__magic_name__ ) # add an entry for [MASK2] UpperCamelCase :Optional[Any] = max(entity_vocab.values() ) + 1 config.entity_vocab_size += 1 UpperCamelCase :Any = XLMRobertaTokenizer.from_pretrained(metadata["""model_config"""]["""bert_model_name"""] ) # Add special tokens to the token vocabulary for downstream tasks UpperCamelCase :Union[str, Any] = AddedToken("""<ent>""" , lstrip=__magic_name__ , rstrip=__magic_name__ ) UpperCamelCase :Optional[int] = AddedToken("""<ent2>""" , lstrip=__magic_name__ , rstrip=__magic_name__ ) tokenizer.add_special_tokens({"""additional_special_tokens""": [entity_token_a, entity_token_a]} ) config.vocab_size += 2 print(f"""Saving tokenizer to {pytorch_dump_folder_path}""" ) tokenizer.save_pretrained(__magic_name__ ) with open(os.path.join(__magic_name__ , """tokenizer_config.json""" ) , """r""" ) as f: UpperCamelCase :str = json.load(__magic_name__ ) UpperCamelCase :Optional[int] = """MLukeTokenizer""" with open(os.path.join(__magic_name__ , """tokenizer_config.json""" ) , """w""" ) as f: json.dump(__magic_name__ , __magic_name__ ) with open(os.path.join(__magic_name__ , MLukeTokenizer.vocab_files_names["""entity_vocab_file"""] ) , """w""" ) as f: json.dump(__magic_name__ , __magic_name__ ) UpperCamelCase :Optional[int] = MLukeTokenizer.from_pretrained(__magic_name__ ) # Initialize the embeddings of the special tokens UpperCamelCase :str = tokenizer.convert_tokens_to_ids(["""@"""] )[0] UpperCamelCase :Optional[int] = tokenizer.convert_tokens_to_ids(["""#"""] )[0] UpperCamelCase :str = state_dict["""embeddings.word_embeddings.weight"""] UpperCamelCase :int = word_emb[ent_init_index].unsqueeze(0 ) UpperCamelCase :Dict = word_emb[enta_init_index].unsqueeze(0 ) UpperCamelCase :Union[str, Any] = torch.cat([word_emb, ent_emb, enta_emb] ) # add special tokens for 'entity_predictions.bias' for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]: UpperCamelCase :Dict = state_dict[bias_name] UpperCamelCase :str = decoder_bias[ent_init_index].unsqueeze(0 ) UpperCamelCase :Dict = decoder_bias[enta_init_index].unsqueeze(0 ) UpperCamelCase :Optional[int] = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] ) # Initialize the query layers of the entity-aware self-attention mechanism for layer_index in range(config.num_hidden_layers ): for matrix_name in ["query.weight", "query.bias"]: UpperCamelCase :str = f"""encoder.layer.{layer_index}.attention.self.""" UpperCamelCase :str = state_dict[prefix + matrix_name] UpperCamelCase :Any = state_dict[prefix + matrix_name] UpperCamelCase :List[Any] = state_dict[prefix + matrix_name] # Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks UpperCamelCase :int = state_dict["""entity_embeddings.entity_embeddings.weight"""] UpperCamelCase :Dict = entity_emb[entity_vocab["""[MASK]"""]].unsqueeze(0 ) UpperCamelCase :int = torch.cat([entity_emb, entity_mask_emb] ) # add [MASK2] for 'entity_predictions.bias' UpperCamelCase :Union[str, Any] = state_dict["""entity_predictions.bias"""] UpperCamelCase :List[Any] = entity_prediction_bias[entity_vocab["""[MASK]"""]].unsqueeze(0 ) UpperCamelCase :Dict = torch.cat([entity_prediction_bias, entity_mask_bias] ) UpperCamelCase :Union[str, Any] = LukeForMaskedLM(config=__magic_name__ ).eval() state_dict.pop("""entity_predictions.decoder.weight""" ) state_dict.pop("""lm_head.decoder.weight""" ) state_dict.pop("""lm_head.decoder.bias""" ) UpperCamelCase :int = OrderedDict() for key, value in state_dict.items(): if not (key.startswith("""lm_head""" ) or key.startswith("""entity_predictions""" )): UpperCamelCase :Union[str, Any] = state_dict[key] else: UpperCamelCase :Optional[Any] = state_dict[key] UpperCamelCase , UpperCamelCase :List[Any] = model.load_state_dict(__magic_name__ , strict=__magic_name__ ) if set(__magic_name__ ) != {"luke.embeddings.position_ids"}: raise ValueError(f"""Unexpected unexpected_keys: {unexpected_keys}""" ) if set(__magic_name__ ) != { "lm_head.decoder.weight", "lm_head.decoder.bias", "entity_predictions.decoder.weight", }: raise ValueError(f"""Unexpected missing_keys: {missing_keys}""" ) model.tie_weights() assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all() assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all() # Check outputs UpperCamelCase :Dict = MLukeTokenizer.from_pretrained(__magic_name__ , task="""entity_classification""" ) UpperCamelCase :Dict = """ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan).""" UpperCamelCase :Union[str, Any] = (0, 9) UpperCamelCase :Optional[Any] = tokenizer(__magic_name__ , entity_spans=[span] , return_tensors="""pt""" ) UpperCamelCase :Union[str, Any] = model(**__magic_name__ ) # Verify word hidden states if model_size == "large": raise NotImplementedError else: # base UpperCamelCase :Union[str, Any] = torch.Size((1, 33, 768) ) UpperCamelCase :Tuple = torch.tensor([[0.0892, 0.0596, -0.2819], [0.0134, 0.1199, 0.0573], [-0.0169, 0.0927, 0.0644]] ) if not (outputs.last_hidden_state.shape == expected_shape): raise ValueError( f"""Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}""" ) if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , __magic_name__ , atol=1E-4 ): raise ValueError # Verify entity hidden states if model_size == "large": raise NotImplementedError else: # base UpperCamelCase :Union[str, Any] = torch.Size((1, 1, 768) ) UpperCamelCase :Tuple = torch.tensor([[-0.1482, 0.0609, 0.0322]] ) if not (outputs.entity_last_hidden_state.shape == expected_shape): raise ValueError( f"""Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is""" f""" {expected_shape}""" ) if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , __magic_name__ , atol=1E-4 ): raise ValueError # Verify masked word/entity prediction UpperCamelCase :Dict = MLukeTokenizer.from_pretrained(__magic_name__ ) UpperCamelCase :Optional[int] = """Tokyo is the capital of <mask>.""" UpperCamelCase :Optional[int] = (24, 30) UpperCamelCase :Optional[Any] = tokenizer(__magic_name__ , entity_spans=[span] , return_tensors="""pt""" ) UpperCamelCase :List[str] = model(**__magic_name__ ) UpperCamelCase :Optional[Any] = encoding["""input_ids"""][0].tolist() UpperCamelCase :Any = input_ids.index(tokenizer.convert_tokens_to_ids("""<mask>""" ) ) UpperCamelCase :str = outputs.logits[0][mask_position_id].argmax(dim=-1 ) assert "Japan" == tokenizer.decode(__magic_name__ ) UpperCamelCase :Tuple = outputs.entity_logits[0][0].argmax().item() UpperCamelCase :str = [ entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id ] assert [e for e in multilingual_predicted_entities if e.startswith("""en:""" )][0] == "en:Japan" # Finally, save our PyTorch model and tokenizer print("""Saving PyTorch model to {}""".format(__magic_name__ ) ) model.save_pretrained(__magic_name__ ) def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Optional[Any] ) -> List[str]: """simple docstring""" UpperCamelCase :Dict = ["""[MASK]""", """[PAD]""", """[UNK]"""] UpperCamelCase :Dict = [json.loads(__magic_name__ ) for line in open(__magic_name__ )] UpperCamelCase :int = {} for entry in data: UpperCamelCase :Dict = entry["""id"""] for entity_name, language in entry["entities"]: if entity_name in SPECIAL_TOKENS: UpperCamelCase :int = entity_id break UpperCamelCase :Dict = f"""{language}:{entity_name}""" UpperCamelCase :Optional[int] = entity_id return new_mapping if __name__ == "__main__": UpperCAmelCase_ : Optional[int] = argparse.ArgumentParser() # Required parameters parser.add_argument('''--checkpoint_path''', type=str, help='''Path to a pytorch_model.bin file.''') parser.add_argument( '''--metadata_path''', default=None, type=str, help='''Path to a metadata.json file, defining the configuration.''' ) parser.add_argument( '''--entity_vocab_path''', default=None, type=str, help='''Path to an entity_vocab.tsv file, containing the entity vocabulary.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to where to dump the output PyTorch model.''' ) parser.add_argument( '''--model_size''', default='''base''', type=str, choices=['''base''', '''large'''], help='''Size of the model to be converted.''' ) UpperCAmelCase_ : int = parser.parse_args() convert_luke_checkpoint( args.checkpoint_path, args.metadata_path, args.entity_vocab_path, args.pytorch_dump_folder_path, args.model_size, )
38
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from pathlib import Path import torch from ...utils import is_npu_available, is_xpu_available from .config_args import ClusterConfig, default_json_config_file from .config_utils import SubcommandHelpFormatter UpperCAmelCase_ : Any = '''Create a default config file for Accelerate with only a few flags set.''' def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Optional[int]="no" , __magic_name__ : str = default_json_config_file , __magic_name__ : bool = False ) -> str: """simple docstring""" UpperCamelCase :Any = Path(__magic_name__ ) path.parent.mkdir(parents=__magic_name__ , exist_ok=__magic_name__ ) if path.exists(): print( f"""Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.""" ) return False UpperCamelCase :Dict = mixed_precision.lower() if mixed_precision not in ["no", "fp16", "bf16", "fp8"]: raise ValueError( f"""`mixed_precision` should be one of 'no', 'fp16', 'bf16', or 'fp8'. Received {mixed_precision}""" ) UpperCamelCase :Optional[Any] = { """compute_environment""": """LOCAL_MACHINE""", """mixed_precision""": mixed_precision, } if torch.cuda.is_available(): UpperCamelCase :Union[str, Any] = torch.cuda.device_count() UpperCamelCase :List[Any] = num_gpus UpperCamelCase :Dict = False if num_gpus > 1: UpperCamelCase :Any = """MULTI_GPU""" else: UpperCamelCase :Any = """NO""" elif is_xpu_available() and use_xpu: UpperCamelCase :Optional[Any] = torch.xpu.device_count() UpperCamelCase :Optional[int] = num_xpus UpperCamelCase :int = False if num_xpus > 1: UpperCamelCase :Union[str, Any] = """MULTI_XPU""" else: UpperCamelCase :Union[str, Any] = """NO""" elif is_npu_available(): UpperCamelCase :List[Any] = torch.npu.device_count() UpperCamelCase :Optional[Any] = num_npus UpperCamelCase :Tuple = False if num_npus > 1: UpperCamelCase :Optional[Any] = """MULTI_NPU""" else: UpperCamelCase :List[Any] = """NO""" else: UpperCamelCase :Any = 0 UpperCamelCase :Optional[Any] = True UpperCamelCase :Optional[Any] = 1 UpperCamelCase :List[str] = """NO""" UpperCamelCase :int = ClusterConfig(**__magic_name__ ) config.to_json_file(__magic_name__ ) return path def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Dict , __magic_name__ : Tuple ) -> List[str]: """simple docstring""" UpperCamelCase :Dict = parser.add_parser("""default""" , parents=__magic_name__ , help=__magic_name__ , formatter_class=__magic_name__ ) parser.add_argument( """--config_file""" , default=__magic_name__ , help=( """The path to use to store the config file. Will default to a file named default_config.yaml in the cache """ """location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have """ """such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed """ """with 'huggingface'.""" ) , dest="""save_location""" , ) parser.add_argument( """--mixed_precision""" , choices=["""no""", """fp16""", """bf16"""] , type=__magic_name__ , help="""Whether or not to use mixed precision training. """ """Choose between FP16 and BF16 (bfloat16) training. """ """BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.""" , default="""no""" , ) parser.set_defaults(func=__magic_name__ ) return parser def SCREAMING_SNAKE_CASE_ ( __magic_name__ : List[Any] ) -> List[str]: """simple docstring""" UpperCamelCase :Optional[Any] = write_basic_config(args.mixed_precision , args.save_location ) if config_file: print(f"""accelerate configuration saved at {config_file}""" )
38
1
import enum import warnings from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING from ..utils import add_end_docstrings, is_tf_available from .base import PIPELINE_INIT_ARGS, Pipeline if is_tf_available(): import tensorflow as tf class _SCREAMING_SNAKE_CASE ( enum.Enum ): snake_case__ : List[Any] = 0 snake_case__ : Any = 1 snake_case__ : Optional[Any] = 2 @add_end_docstrings(_a ) class _SCREAMING_SNAKE_CASE ( _a ): snake_case__ : Union[str, Any] = """ In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The voice of Nicholas's young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision and denounces one of the men as a horse thief. Although his father initially slaps him for making such an accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop, begging for his blessing. <eod> </s> <eos> """ def __init__( self : Tuple , *__lowerCamelCase : Optional[Any] , **__lowerCamelCase : int ): super().__init__(*__lowerCamelCase , **__lowerCamelCase ) self.check_model_type( TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == """tf""" else MODEL_FOR_CAUSAL_LM_MAPPING ) if "prefix" not in self._preprocess_params: # This is very specific. The logic is quite complex and needs to be done # as a "default". # It also defines both some preprocess_kwargs and generate_kwargs # which is why we cannot put them in their respective methods. UpperCamelCase :Union[str, Any] = None if self.model.config.prefix is not None: UpperCamelCase :List[Any] = self.model.config.prefix if prefix is None and self.model.__class__.__name__ in [ "XLNetLMHeadModel", "TransfoXLLMHeadModel", "TFXLNetLMHeadModel", "TFTransfoXLLMHeadModel", ]: # For XLNet and TransformerXL we add an article to the prompt to give more state to the model. UpperCamelCase :List[Any] = self.XL_PREFIX if prefix is not None: # Recalculate some generate_kwargs linked to prefix. UpperCamelCase , UpperCamelCase , UpperCamelCase :Any = self._sanitize_parameters(prefix=__lowerCamelCase , **self._forward_params ) UpperCamelCase :List[Any] = {**self._preprocess_params, **preprocess_params} UpperCamelCase :Union[str, Any] = {**self._forward_params, **forward_params} def _A ( self : int , __lowerCamelCase : List[Any]=None , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : Dict=None , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : Any=None , __lowerCamelCase : Optional[Any]=None , __lowerCamelCase : Optional[Any]=None , **__lowerCamelCase : str , ): UpperCamelCase :Any = {} if prefix is not None: UpperCamelCase :int = prefix if prefix: UpperCamelCase :List[Any] = self.tokenizer( __lowerCamelCase , padding=__lowerCamelCase , add_special_tokens=__lowerCamelCase , return_tensors=self.framework ) UpperCamelCase :Dict = prefix_inputs["""input_ids"""].shape[-1] if handle_long_generation is not None: if handle_long_generation not in {"hole"}: raise ValueError( F"""{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected""" """ [None, 'hole']""" ) UpperCamelCase :List[Any] = handle_long_generation preprocess_params.update(__lowerCamelCase ) UpperCamelCase :Optional[Any] = generate_kwargs UpperCamelCase :Any = {} if return_full_text is not None and return_type is None: if return_text is not None: raise ValueError("""`return_text` is mutually exclusive with `return_full_text`""" ) if return_tensors is not None: raise ValueError("""`return_full_text` is mutually exclusive with `return_tensors`""" ) UpperCamelCase :Optional[Any] = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT if return_tensors is not None and return_type is None: if return_text is not None: raise ValueError("""`return_text` is mutually exclusive with `return_tensors`""" ) UpperCamelCase :Dict = ReturnType.TENSORS if return_type is not None: UpperCamelCase :List[Any] = return_type if clean_up_tokenization_spaces is not None: UpperCamelCase :int = clean_up_tokenization_spaces if stop_sequence is not None: UpperCamelCase :List[str] = self.tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) if len(__lowerCamelCase ) > 1: warnings.warn( """Stopping on a multiple token sequence is not yet supported on transformers. The first token of""" """ the stop sequence will be used as the stop sequence string in the interim.""" ) UpperCamelCase :Optional[int] = stop_sequence_ids[0] return preprocess_params, forward_params, postprocess_params def _A ( self : Dict , *__lowerCamelCase : Any , **__lowerCamelCase : List[str] ): # Parse arguments if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]: kwargs.update({"""add_space_before_punct_symbol""": True} ) return super()._parse_and_tokenize(*__lowerCamelCase , **__lowerCamelCase ) def __call__( self : List[str] , __lowerCamelCase : Optional[int] , **__lowerCamelCase : str ): return super().__call__(__lowerCamelCase , **__lowerCamelCase ) def _A ( self : Optional[int] , __lowerCamelCase : List[str] , __lowerCamelCase : Dict="" , __lowerCamelCase : Tuple=None , **__lowerCamelCase : Union[str, Any] ): UpperCamelCase :Dict = self.tokenizer( prefix + prompt_text , padding=__lowerCamelCase , add_special_tokens=__lowerCamelCase , return_tensors=self.framework ) UpperCamelCase :List[str] = prompt_text if handle_long_generation == "hole": UpperCamelCase :str = inputs["""input_ids"""].shape[-1] if "max_new_tokens" in generate_kwargs: UpperCamelCase :List[Any] = generate_kwargs["""max_new_tokens"""] else: UpperCamelCase :Tuple = generate_kwargs.get("""max_length""" , self.model.config.max_length ) - cur_len if new_tokens < 0: raise ValueError("""We cannot infer how many new tokens are expected""" ) if cur_len + new_tokens > self.tokenizer.model_max_length: UpperCamelCase :Optional[int] = self.tokenizer.model_max_length - new_tokens if keep_length <= 0: raise ValueError( """We cannot use `hole` to handle this generation the number of desired tokens exceeds the""" """ models max length""" ) UpperCamelCase :int = inputs["""input_ids"""][:, -keep_length:] if "attention_mask" in inputs: UpperCamelCase :Any = inputs["""attention_mask"""][:, -keep_length:] return inputs def _A ( self : str , __lowerCamelCase : int , **__lowerCamelCase : str ): UpperCamelCase :str = model_inputs["""input_ids"""] UpperCamelCase :Optional[int] = model_inputs.get("""attention_mask""" , __lowerCamelCase ) # Allow empty prompts if input_ids.shape[1] == 0: UpperCamelCase :int = None UpperCamelCase :Union[str, Any] = None UpperCamelCase :List[Any] = 1 else: UpperCamelCase :List[Any] = input_ids.shape[0] UpperCamelCase :Any = model_inputs.pop("""prompt_text""" ) # If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying # generate_kwargs, as some of the parameterization may come from the initialization of the pipeline. UpperCamelCase :List[str] = generate_kwargs.pop("""prefix_length""" , 0 ) if prefix_length > 0: UpperCamelCase :List[str] = """max_new_tokens""" in generate_kwargs or ( """generation_config""" in generate_kwargs and generate_kwargs["""generation_config"""].max_new_tokens is not None ) if not has_max_new_tokens: UpperCamelCase :Optional[int] = generate_kwargs.get("""max_length""" ) or self.model.config.max_length generate_kwargs["max_length"] += prefix_length UpperCamelCase :List[str] = """min_new_tokens""" in generate_kwargs or ( """generation_config""" in generate_kwargs and generate_kwargs["""generation_config"""].min_new_tokens is not None ) if not has_min_new_tokens and "min_length" in generate_kwargs: generate_kwargs["min_length"] += prefix_length # BS x SL UpperCamelCase :Union[str, Any] = self.model.generate(input_ids=__lowerCamelCase , attention_mask=__lowerCamelCase , **__lowerCamelCase ) UpperCamelCase :Optional[Any] = generated_sequence.shape[0] if self.framework == "pt": UpperCamelCase :List[Any] = generated_sequence.reshape(__lowerCamelCase , out_b // in_b , *generated_sequence.shape[1:] ) elif self.framework == "tf": UpperCamelCase :Optional[int] = tf.reshape(__lowerCamelCase , (in_b, out_b // in_b, *generated_sequence.shape[1:]) ) return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text} def _A ( self : Optional[Any] , __lowerCamelCase : List[str] , __lowerCamelCase : Optional[Any]=ReturnType.FULL_TEXT , __lowerCamelCase : Optional[Any]=True ): UpperCamelCase :str = model_outputs["""generated_sequence"""][0] UpperCamelCase :Optional[int] = model_outputs["""input_ids"""] UpperCamelCase :int = model_outputs["""prompt_text"""] UpperCamelCase :Optional[Any] = generated_sequence.numpy().tolist() UpperCamelCase :Optional[int] = [] for sequence in generated_sequence: if return_type == ReturnType.TENSORS: UpperCamelCase :Dict = {"""generated_token_ids""": sequence} elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}: # Decode text UpperCamelCase :Optional[Any] = self.tokenizer.decode( __lowerCamelCase , skip_special_tokens=__lowerCamelCase , clean_up_tokenization_spaces=__lowerCamelCase , ) # Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used if input_ids is None: UpperCamelCase :Dict = 0 else: UpperCamelCase :Optional[Any] = len( self.tokenizer.decode( input_ids[0] , skip_special_tokens=__lowerCamelCase , clean_up_tokenization_spaces=__lowerCamelCase , ) ) if return_type == ReturnType.FULL_TEXT: UpperCamelCase :Tuple = prompt_text + text[prompt_length:] else: UpperCamelCase :Tuple = text[prompt_length:] UpperCamelCase :Dict = {"""generated_text""": all_text} records.append(__lowerCamelCase ) return records
38
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) UpperCAmelCase_ : str = {'''configuration_opt''': ['''OPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''OPTConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ : Tuple = [ '''OPT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''OPTForCausalLM''', '''OPTModel''', '''OPTPreTrainedModel''', '''OPTForSequenceClassification''', '''OPTForQuestionAnswering''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ : Any = ['''TFOPTForCausalLM''', '''TFOPTModel''', '''TFOPTPreTrainedModel'''] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ : Optional[Any] = [ '''FlaxOPTForCausalLM''', '''FlaxOPTModel''', '''FlaxOPTPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_opt import ( OPT_PRETRAINED_MODEL_ARCHIVE_LIST, OPTForCausalLM, OPTForQuestionAnswering, OPTForSequenceClassification, OPTModel, OPTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel else: import sys UpperCAmelCase_ : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
38
1
import warnings from ...utils import logging from .image_processing_clip import CLIPImageProcessor UpperCAmelCase_ : Any = logging.get_logger(__name__) class _SCREAMING_SNAKE_CASE ( _a ): def __init__( self : Union[str, Any] , *__lowerCamelCase : List[Any] , **__lowerCamelCase : Any ): warnings.warn( """The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please""" """ use CLIPImageProcessor instead.""" , __lowerCamelCase , ) super().__init__(*__lowerCamelCase , **__lowerCamelCase )
38
import gc import random import unittest import numpy as np import torch from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline from diffusers.pipelines.shap_e import ShapERenderer from diffusers.utils import floats_tensor, load_image, load_numpy, slow from diffusers.utils.testing_utils import require_torch_gpu, torch_device from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference class _SCREAMING_SNAKE_CASE ( _a , unittest.TestCase ): snake_case__ : Tuple = ShapEImgaImgPipeline snake_case__ : Optional[Any] = ["""image"""] snake_case__ : Union[str, Any] = ["""image"""] snake_case__ : Optional[Any] = [ """num_images_per_prompt""", """num_inference_steps""", """generator""", """latents""", """guidance_scale""", """frame_size""", """output_type""", """return_dict""", ] snake_case__ : List[str] = False @property def _A ( self : Any ): return 32 @property def _A ( self : Any ): return 32 @property def _A ( self : Optional[Any] ): return self.time_input_dim * 4 @property def _A ( self : Union[str, Any] ): return 8 @property def _A ( self : int ): torch.manual_seed(0 ) UpperCamelCase :Union[str, Any] = CLIPVisionConfig( hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , ) UpperCamelCase :Optional[int] = CLIPVisionModel(__lowerCamelCase ) return model @property def _A ( self : str ): UpperCamelCase :Optional[int] = CLIPImageProcessor( crop_size=224 , do_center_crop=__lowerCamelCase , do_normalize=__lowerCamelCase , do_resize=__lowerCamelCase , image_mean=[0.48145466, 0.4578275, 0.40821073] , image_std=[0.26862954, 0.26130258, 0.27577711] , resample=3 , size=224 , ) return image_processor @property def _A ( self : Tuple ): torch.manual_seed(0 ) UpperCamelCase :Dict = { """num_attention_heads""": 2, """attention_head_dim""": 16, """embedding_dim""": self.time_input_dim, """num_embeddings""": 32, """embedding_proj_dim""": self.text_embedder_hidden_size, """time_embed_dim""": self.time_embed_dim, """num_layers""": 1, """clip_embed_dim""": self.time_input_dim * 2, """additional_embeddings""": 0, """time_embed_act_fn""": """gelu""", """norm_in_type""": """layer""", """embedding_proj_norm_type""": """layer""", """encoder_hid_proj_type""": None, """added_emb_type""": None, } UpperCamelCase :int = PriorTransformer(**__lowerCamelCase ) return model @property def _A ( self : Optional[int] ): torch.manual_seed(0 ) UpperCamelCase :str = { """param_shapes""": ( (self.renderer_dim, 93), (self.renderer_dim, 8), (self.renderer_dim, 8), (self.renderer_dim, 8), ), """d_latent""": self.time_input_dim, """d_hidden""": self.renderer_dim, """n_output""": 12, """background""": ( 0.1, 0.1, 0.1, ), } UpperCamelCase :List[str] = ShapERenderer(**__lowerCamelCase ) return model def _A ( self : str ): UpperCamelCase :int = self.dummy_prior UpperCamelCase :Any = self.dummy_image_encoder UpperCamelCase :Dict = self.dummy_image_processor UpperCamelCase :List[Any] = self.dummy_renderer UpperCamelCase :int = HeunDiscreteScheduler( beta_schedule="""exp""" , num_train_timesteps=1_024 , prediction_type="""sample""" , use_karras_sigmas=__lowerCamelCase , clip_sample=__lowerCamelCase , clip_sample_range=1.0 , ) UpperCamelCase :Optional[Any] = { """prior""": prior, """image_encoder""": image_encoder, """image_processor""": image_processor, """renderer""": renderer, """scheduler""": scheduler, } return components def _A ( self : int , __lowerCamelCase : int , __lowerCamelCase : Any=0 ): UpperCamelCase :Any = floats_tensor((1, 3, 64, 64) , rng=random.Random(__lowerCamelCase ) ).to(__lowerCamelCase ) if str(__lowerCamelCase ).startswith("""mps""" ): UpperCamelCase :List[Any] = torch.manual_seed(__lowerCamelCase ) else: UpperCamelCase :Optional[int] = torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase ) UpperCamelCase :Optional[Any] = { """image""": input_image, """generator""": generator, """num_inference_steps""": 1, """frame_size""": 32, """output_type""": """np""", } return inputs def _A ( self : List[str] ): UpperCamelCase :Dict = """cpu""" UpperCamelCase :List[Any] = self.get_dummy_components() UpperCamelCase :Optional[int] = self.pipeline_class(**__lowerCamelCase ) UpperCamelCase :int = pipe.to(__lowerCamelCase ) pipe.set_progress_bar_config(disable=__lowerCamelCase ) UpperCamelCase :Optional[Any] = pipe(**self.get_dummy_inputs(__lowerCamelCase ) ) UpperCamelCase :Dict = output.images[0] UpperCamelCase :List[Any] = image[0, -3:, -3:, -1] assert image.shape == (20, 32, 32, 3) UpperCamelCase :Dict = np.array( [ 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, ] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def _A ( self : List[Any] ): # NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches self._test_inference_batch_consistent(batch_sizes=[1, 2] ) def _A ( self : List[Any] ): UpperCamelCase :str = torch_device == """cpu""" UpperCamelCase :int = True self._test_inference_batch_single_identical( batch_size=2 , test_max_difference=__lowerCamelCase , relax_max_difference=__lowerCamelCase , ) def _A ( self : List[Any] ): UpperCamelCase :List[Any] = self.get_dummy_components() UpperCamelCase :Optional[int] = self.pipeline_class(**__lowerCamelCase ) UpperCamelCase :List[Any] = pipe.to(__lowerCamelCase ) pipe.set_progress_bar_config(disable=__lowerCamelCase ) UpperCamelCase :Any = 1 UpperCamelCase :int = 2 UpperCamelCase :Union[str, Any] = self.get_dummy_inputs(__lowerCamelCase ) for key in inputs.keys(): if key in self.batch_params: UpperCamelCase :str = batch_size * [inputs[key]] UpperCamelCase :Optional[int] = pipe(**__lowerCamelCase , num_images_per_prompt=__lowerCamelCase )[0] assert images.shape[0] == batch_size * num_images_per_prompt @slow @require_torch_gpu class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): def _A ( self : Any ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def _A ( self : Any ): UpperCamelCase :Optional[Any] = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/shap_e/corgi.png""" ) UpperCamelCase :Any = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/shap_e/test_shap_e_img2img_out.npy""" ) UpperCamelCase :Union[str, Any] = ShapEImgaImgPipeline.from_pretrained("""openai/shap-e-img2img""" ) UpperCamelCase :List[str] = pipe.to(__lowerCamelCase ) pipe.set_progress_bar_config(disable=__lowerCamelCase ) UpperCamelCase :Optional[Any] = torch.Generator(device=__lowerCamelCase ).manual_seed(0 ) UpperCamelCase :Optional[int] = pipe( __lowerCamelCase , generator=__lowerCamelCase , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type="""np""" , ).images[0] assert images.shape == (20, 64, 64, 3) assert_mean_pixel_difference(__lowerCamelCase , __lowerCamelCase )
38
1
import argparse import logging import pickle from collections import Counter logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO ) UpperCAmelCase_ : List[Any] = logging.getLogger(__name__) if __name__ == "__main__": UpperCAmelCase_ : Optional[Any] = argparse.ArgumentParser( description='''Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)''' ) parser.add_argument( '''--data_file''', type=str, default='''data/dump.bert-base-uncased.pickle''', help='''The binarized dataset.''' ) parser.add_argument( '''--token_counts_dump''', type=str, default='''data/token_counts.bert-base-uncased.pickle''', help='''The dump file.''' ) parser.add_argument('''--vocab_size''', default=3_05_22, type=int) UpperCAmelCase_ : str = parser.parse_args() logger.info(F'''Loading data from {args.data_file}''') with open(args.data_file, '''rb''') as fp: UpperCAmelCase_ : Optional[Any] = pickle.load(fp) logger.info('''Counting occurrences for MLM.''') UpperCAmelCase_ : Optional[int] = Counter() for tk_ids in data: counter.update(tk_ids) UpperCAmelCase_ : Any = [0] * args.vocab_size for k, v in counter.items(): UpperCAmelCase_ : str = v logger.info(F'''Dump to {args.token_counts_dump}''') with open(args.token_counts_dump, '''wb''') as handle: pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
38
from sklearn.metrics import fa_score, matthews_corrcoef import datasets from .record_evaluation import evaluate as evaluate_record UpperCAmelCase_ : int = '''\ @article{wang2019superglue, title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems}, author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R}, journal={arXiv preprint arXiv:1905.00537}, year={2019} } ''' UpperCAmelCase_ : Optional[Any] = '''\ SuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after GLUE with a new set of more difficult language understanding tasks, improved resources, and a new public leaderboard. ''' UpperCAmelCase_ : int = ''' Compute SuperGLUE evaluation metric associated to each SuperGLUE dataset. Args: predictions: list of predictions to score. Depending on the SuperGlUE subset: - for \'record\': list of question-answer dictionaries with the following keys: - \'idx\': index of the question as specified by the dataset - \'prediction_text\': the predicted answer text - for \'multirc\': list of question-answer dictionaries with the following keys: - \'idx\': index of the question-answer pair as specified by the dataset - \'prediction\': the predicted answer label - otherwise: list of predicted labels references: list of reference labels. Depending on the SuperGLUE subset: - for \'record\': list of question-answers dictionaries with the following keys: - \'idx\': index of the question as specified by the dataset - \'answers\': list of possible answers - otherwise: list of reference labels Returns: depending on the SuperGLUE subset: - for \'record\': - \'exact_match\': Exact match between answer and gold answer - \'f1\': F1 score - for \'multirc\': - \'exact_match\': Exact match between answer and gold answer - \'f1_m\': Per-question macro-F1 score - \'f1_a\': Average F1 score over all answers - for \'axb\': \'matthews_correlation\': Matthew Correlation - for \'cb\': - \'accuracy\': Accuracy - \'f1\': F1 score - for all others: - \'accuracy\': Accuracy Examples: >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'copa\') # any of ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"] >>> predictions = [0, 1] >>> references = [0, 1] >>> results = super_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {\'accuracy\': 1.0} >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'cb\') >>> predictions = [0, 1] >>> references = [0, 1] >>> results = super_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {\'accuracy\': 1.0, \'f1\': 1.0} >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'record\') >>> predictions = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'prediction_text\': \'answer\'}] >>> references = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'answers\': [\'answer\', \'another_answer\']}] >>> results = super_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {\'exact_match\': 1.0, \'f1\': 1.0} >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'multirc\') >>> predictions = [{\'idx\': {\'answer\': 0, \'paragraph\': 0, \'question\': 0}, \'prediction\': 0}, {\'idx\': {\'answer\': 1, \'paragraph\': 2, \'question\': 3}, \'prediction\': 1}] >>> references = [0, 1] >>> results = super_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {\'exact_match\': 1.0, \'f1_m\': 1.0, \'f1_a\': 1.0} >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'axb\') >>> references = [0, 1] >>> predictions = [0, 1] >>> results = super_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {\'matthews_correlation\': 1.0} ''' def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Optional[Any] , __magic_name__ : Optional[Any] ) -> Union[str, Any]: """simple docstring""" return float((preds == labels).mean() ) def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Dict , __magic_name__ : int , __magic_name__ : Any="binary" ) -> Dict: """simple docstring""" UpperCamelCase :List[str] = simple_accuracy(__magic_name__ , __magic_name__ ) UpperCamelCase :Dict = float(fa_score(y_true=__magic_name__ , y_pred=__magic_name__ , average=__magic_name__ ) ) return { "accuracy": acc, "f1": fa, } def SCREAMING_SNAKE_CASE_ ( __magic_name__ : List[Any] , __magic_name__ : Optional[Any] ) -> Optional[Any]: """simple docstring""" UpperCamelCase :Optional[Any] = {} for id_pred, label in zip(__magic_name__ , __magic_name__ ): UpperCamelCase :str = f"""{id_pred['idx']['paragraph']}-{id_pred['idx']['question']}""" UpperCamelCase :Union[str, Any] = id_pred["""prediction"""] if question_id in question_map: question_map[question_id].append((pred, label) ) else: UpperCamelCase :Dict = [(pred, label)] UpperCamelCase , UpperCamelCase :Optional[int] = [], [] for question, preds_labels in question_map.items(): UpperCamelCase , UpperCamelCase :Optional[Any] = zip(*__magic_name__ ) UpperCamelCase :Optional[int] = fa_score(y_true=__magic_name__ , y_pred=__magic_name__ , average="""macro""" ) fas.append(__magic_name__ ) UpperCamelCase :int = int(sum(pred == label for pred, label in preds_labels ) == len(__magic_name__ ) ) ems.append(__magic_name__ ) UpperCamelCase :Optional[int] = float(sum(__magic_name__ ) / len(__magic_name__ ) ) UpperCamelCase :str = sum(__magic_name__ ) / len(__magic_name__ ) UpperCamelCase :Tuple = float(fa_score(y_true=__magic_name__ , y_pred=[id_pred["""prediction"""] for id_pred in ids_preds] ) ) return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a} @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class _SCREAMING_SNAKE_CASE ( datasets.Metric ): def _A ( self : str ): if self.config_name not in [ "boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg", ]: raise KeyError( """You should supply a configuration name selected in """ """[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]""" ) return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format="""numpy""" if not self.config_name == """record""" and not self.config_name == """multirc""" else None , ) def _A ( self : Optional[Any] ): if self.config_name == "record": return { "predictions": { "idx": { "passage": datasets.Value("""int64""" ), "query": datasets.Value("""int64""" ), }, "prediction_text": datasets.Value("""string""" ), }, "references": { "idx": { "passage": datasets.Value("""int64""" ), "query": datasets.Value("""int64""" ), }, "answers": datasets.Sequence(datasets.Value("""string""" ) ), }, } elif self.config_name == "multirc": return { "predictions": { "idx": { "answer": datasets.Value("""int64""" ), "paragraph": datasets.Value("""int64""" ), "question": datasets.Value("""int64""" ), }, "prediction": datasets.Value("""int64""" ), }, "references": datasets.Value("""int64""" ), } else: return { "predictions": datasets.Value("""int64""" ), "references": datasets.Value("""int64""" ), } def _A ( self : List[str] , __lowerCamelCase : Tuple , __lowerCamelCase : str ): if self.config_name == "axb": return {"matthews_correlation": matthews_corrcoef(__lowerCamelCase , __lowerCamelCase )} elif self.config_name == "cb": return acc_and_fa(__lowerCamelCase , __lowerCamelCase , fa_avg="""macro""" ) elif self.config_name == "record": UpperCamelCase :Optional[Any] = [ { """qas""": [ {"""id""": ref["""idx"""]["""query"""], """answers""": [{"""text""": ans} for ans in ref["""answers"""]]} for ref in references ] } ] UpperCamelCase :Tuple = {pred["""idx"""]["""query"""]: pred["""prediction_text"""] for pred in predictions} return evaluate_record(__lowerCamelCase , __lowerCamelCase )[0] elif self.config_name == "multirc": return evaluate_multirc(__lowerCamelCase , __lowerCamelCase ) elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]: return {"accuracy": simple_accuracy(__lowerCamelCase , __lowerCamelCase )} else: raise KeyError( """You should supply a configuration name selected in """ """[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]""" )
38
1
import gc import random import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import ( AutoencoderKL, DiffusionPipeline, EulerDiscreteScheduler, StableDiffusionXLImgaImgPipeline, UNetaDConditionModel, ) from diffusers.utils import floats_tensor, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class _SCREAMING_SNAKE_CASE ( _a , _a , unittest.TestCase ): snake_case__ : Any = StableDiffusionXLImgaImgPipeline snake_case__ : Tuple = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width"""} snake_case__ : Tuple = PipelineTesterMixin.required_optional_params - {"""latents"""} snake_case__ : Any = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS snake_case__ : List[str] = IMAGE_TO_IMAGE_IMAGE_PARAMS snake_case__ : Tuple = IMAGE_TO_IMAGE_IMAGE_PARAMS def _A ( self : int ): torch.manual_seed(0 ) UpperCamelCase :Any = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , attention_head_dim=(2, 4) , use_linear_projection=__lowerCamelCase , addition_embed_type="""text_time""" , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=80 , cross_attention_dim=64 , ) UpperCamelCase :Tuple = EulerDiscreteScheduler( beta_start=0.00085 , beta_end=0.012 , steps_offset=1 , beta_schedule="""scaled_linear""" , timestep_spacing="""leading""" , ) torch.manual_seed(0 ) UpperCamelCase :Union[str, Any] = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , ) torch.manual_seed(0 ) UpperCamelCase :Optional[int] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act="""gelu""" , projection_dim=32 , ) UpperCamelCase :Any = CLIPTextModel(__lowerCamelCase ) UpperCamelCase :List[Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" , local_files_only=__lowerCamelCase ) UpperCamelCase :List[Any] = CLIPTextModelWithProjection(__lowerCamelCase ) UpperCamelCase :int = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" , local_files_only=__lowerCamelCase ) UpperCamelCase :Union[str, Any] = { """unet""": unet, """scheduler""": scheduler, """vae""": vae, """text_encoder""": text_encoder, """tokenizer""": tokenizer, """text_encoder_2""": text_encoder_a, """tokenizer_2""": tokenizer_a, # "safety_checker": None, # "feature_extractor": None, } return components def _A ( self : Tuple , __lowerCamelCase : Any , __lowerCamelCase : Optional[Any]=0 ): UpperCamelCase :Tuple = floats_tensor((1, 3, 32, 32) , rng=random.Random(__lowerCamelCase ) ).to(__lowerCamelCase ) UpperCamelCase :List[str] = image / 2 + 0.5 if str(__lowerCamelCase ).startswith("""mps""" ): UpperCamelCase :Any = torch.manual_seed(__lowerCamelCase ) else: UpperCamelCase :List[Any] = torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase ) UpperCamelCase :str = { """prompt""": """A painting of a squirrel eating a burger""", """image""": image, """generator""": generator, """num_inference_steps""": 2, """guidance_scale""": 5.0, """output_type""": """numpy""", """strength""": 0.75, } return inputs def _A ( self : str ): UpperCamelCase :List[str] = """cpu""" # ensure determinism for the device-dependent torch.Generator UpperCamelCase :Optional[Any] = self.get_dummy_components() UpperCamelCase :List[Any] = StableDiffusionXLImgaImgPipeline(**__lowerCamelCase ) UpperCamelCase :Any = sd_pipe.to(__lowerCamelCase ) sd_pipe.set_progress_bar_config(disable=__lowerCamelCase ) UpperCamelCase :Optional[Any] = self.get_dummy_inputs(__lowerCamelCase ) UpperCamelCase :Union[str, Any] = sd_pipe(**__lowerCamelCase ).images UpperCamelCase :Union[str, Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) UpperCamelCase :List[Any] = np.array([0.4656, 0.4840, 0.4439, 0.6698, 0.5574, 0.4524, 0.5799, 0.5943, 0.5165] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def _A ( self : Dict ): super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 ) def _A ( self : Optional[Any] ): super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) def _A ( self : Union[str, Any] ): pass def _A ( self : Optional[int] ): UpperCamelCase :Union[str, Any] = self.get_dummy_components() UpperCamelCase :Dict = StableDiffusionXLImgaImgPipeline(**__lowerCamelCase ) UpperCamelCase :List[Any] = sd_pipe.to(__lowerCamelCase ) UpperCamelCase :List[str] = sd_pipe.to(__lowerCamelCase ) sd_pipe.set_progress_bar_config(disable=__lowerCamelCase ) # forward without prompt embeds UpperCamelCase :List[Any] = self.get_dummy_inputs(__lowerCamelCase ) UpperCamelCase :int = 3 * ["""this is a negative prompt"""] UpperCamelCase :Union[str, Any] = negative_prompt UpperCamelCase :Union[str, Any] = 3 * [inputs["""prompt"""]] UpperCamelCase :Dict = sd_pipe(**__lowerCamelCase ) UpperCamelCase :Union[str, Any] = output.images[0, -3:, -3:, -1] # forward with prompt embeds UpperCamelCase :Union[str, Any] = self.get_dummy_inputs(__lowerCamelCase ) UpperCamelCase :Optional[int] = 3 * ["""this is a negative prompt"""] UpperCamelCase :Union[str, Any] = 3 * [inputs.pop("""prompt""" )] ( ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ) :Union[str, Any] = sd_pipe.encode_prompt(__lowerCamelCase , negative_prompt=__lowerCamelCase ) UpperCamelCase :Dict = sd_pipe( **__lowerCamelCase , prompt_embeds=__lowerCamelCase , negative_prompt_embeds=__lowerCamelCase , pooled_prompt_embeds=__lowerCamelCase , negative_pooled_prompt_embeds=__lowerCamelCase , ) UpperCamelCase :Union[str, Any] = output.images[0, -3:, -3:, -1] # make sure that it's equal assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4 @slow @require_torch_gpu class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): def _A ( self : Tuple ): super().tearDown() gc.collect() torch.cuda.empty_cache() def _A ( self : List[Any] , __lowerCamelCase : Any , __lowerCamelCase : Dict="cpu" , __lowerCamelCase : List[Any]=torch.floataa , __lowerCamelCase : Tuple=0 ): UpperCamelCase :Optional[int] = torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase ) UpperCamelCase :Optional[Any] = np.random.RandomState(__lowerCamelCase ).standard_normal((1, 4, 64, 64) ) UpperCamelCase :Dict = torch.from_numpy(__lowerCamelCase ).to(device=__lowerCamelCase , dtype=__lowerCamelCase ) UpperCamelCase :str = { """prompt""": """a photograph of an astronaut riding a horse""", """latents""": latents, """generator""": generator, """num_inference_steps""": 3, """guidance_scale""": 7.5, """output_type""": """numpy""", } return inputs def _A ( self : Optional[Any] ): UpperCamelCase :Any = DiffusionPipeline.from_pretrained("""stabilityai/stable-diffusion-2-base""" ) pipe.to(__lowerCamelCase ) pipe.set_progress_bar_config(disable=__lowerCamelCase ) UpperCamelCase :Optional[Any] = self.get_inputs(__lowerCamelCase ) UpperCamelCase :Optional[int] = pipe(**__lowerCamelCase ).images UpperCamelCase :Dict = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) UpperCamelCase :Union[str, Any] = np.array([0.49493, 0.47896, 0.40798, 0.54214, 0.53212, 0.48202, 0.47656, 0.46329, 0.48506] ) assert np.abs(image_slice - expected_slice ).max() < 7E-3
38
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): def __init__( self : List[str] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Any=13 , __lowerCamelCase : Dict=3 , __lowerCamelCase : int=224 , __lowerCamelCase : Any=30 , __lowerCamelCase : Tuple=400 , __lowerCamelCase : int=True , __lowerCamelCase : List[str]=None , __lowerCamelCase : Any=True , __lowerCamelCase : Dict=[0.5, 0.5, 0.5] , __lowerCamelCase : List[Any]=[0.5, 0.5, 0.5] , ): UpperCamelCase :List[Any] = size if size is not None else {"""height""": 18, """width""": 18} UpperCamelCase :str = parent UpperCamelCase :Optional[int] = batch_size UpperCamelCase :Dict = num_channels UpperCamelCase :str = image_size UpperCamelCase :Dict = min_resolution UpperCamelCase :str = max_resolution UpperCamelCase :Union[str, Any] = do_resize UpperCamelCase :Optional[Any] = size UpperCamelCase :Any = do_normalize UpperCamelCase :Optional[Any] = image_mean UpperCamelCase :Tuple = image_std def _A ( self : int ): return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, } @require_torch @require_vision class _SCREAMING_SNAKE_CASE ( _a , unittest.TestCase ): snake_case__ : List[Any] = ViTImageProcessor if is_vision_available() else None def _A ( self : str ): UpperCamelCase :Tuple = EfficientFormerImageProcessorTester(self ) @property def _A ( self : List[str] ): return self.image_proc_tester.prepare_image_processor_dict() def _A ( self : int ): UpperCamelCase :List[Any] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__lowerCamelCase , """image_mean""" ) ) self.assertTrue(hasattr(__lowerCamelCase , """image_std""" ) ) self.assertTrue(hasattr(__lowerCamelCase , """do_normalize""" ) ) self.assertTrue(hasattr(__lowerCamelCase , """do_resize""" ) ) self.assertTrue(hasattr(__lowerCamelCase , """size""" ) ) def _A ( self : Optional[int] ): pass def _A ( self : str ): # Initialize image_processor UpperCamelCase :Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images UpperCamelCase :Union[str, Any] = prepare_image_inputs(self.image_proc_tester , equal_resolution=__lowerCamelCase ) for image in image_inputs: self.assertIsInstance(__lowerCamelCase , Image.Image ) # Test not batched input UpperCamelCase :List[str] = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size["""height"""], self.image_proc_tester.size["""width"""], ) , ) # Test batched UpperCamelCase :List[Any] = image_processor(__lowerCamelCase , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size["""height"""], self.image_proc_tester.size["""width"""], ) , ) def _A ( self : Union[str, Any] ): # Initialize image_processor UpperCamelCase :Optional[int] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors UpperCamelCase :List[Any] = prepare_image_inputs(self.image_proc_tester , equal_resolution=__lowerCamelCase , numpify=__lowerCamelCase ) for image in image_inputs: self.assertIsInstance(__lowerCamelCase , np.ndarray ) # Test not batched input UpperCamelCase :Dict = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size["""height"""], self.image_proc_tester.size["""width"""], ) , ) # Test batched UpperCamelCase :Tuple = image_processor(__lowerCamelCase , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size["""height"""], self.image_proc_tester.size["""width"""], ) , ) def _A ( self : List[Any] ): # Initialize image_processor UpperCamelCase :List[str] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors UpperCamelCase :Any = prepare_image_inputs(self.image_proc_tester , equal_resolution=__lowerCamelCase , torchify=__lowerCamelCase ) for image in image_inputs: self.assertIsInstance(__lowerCamelCase , torch.Tensor ) # Test not batched input UpperCamelCase :List[Any] = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size["""height"""], self.image_proc_tester.size["""width"""], ) , ) # Test batched UpperCamelCase :str = image_processor(__lowerCamelCase , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size["""height"""], self.image_proc_tester.size["""width"""], ) , )
38
1
from collections import OrderedDict from ...utils import logging from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update from .configuration_auto import CONFIG_MAPPING_NAMES UpperCAmelCase_ : Union[str, Any] = logging.get_logger(__name__) UpperCAmelCase_ : List[str] = OrderedDict( [ # Base model mapping ('''albert''', '''FlaxAlbertModel'''), ('''bart''', '''FlaxBartModel'''), ('''beit''', '''FlaxBeitModel'''), ('''bert''', '''FlaxBertModel'''), ('''big_bird''', '''FlaxBigBirdModel'''), ('''blenderbot''', '''FlaxBlenderbotModel'''), ('''blenderbot-small''', '''FlaxBlenderbotSmallModel'''), ('''clip''', '''FlaxCLIPModel'''), ('''distilbert''', '''FlaxDistilBertModel'''), ('''electra''', '''FlaxElectraModel'''), ('''gpt-sw3''', '''FlaxGPT2Model'''), ('''gpt2''', '''FlaxGPT2Model'''), ('''gpt_neo''', '''FlaxGPTNeoModel'''), ('''gptj''', '''FlaxGPTJModel'''), ('''longt5''', '''FlaxLongT5Model'''), ('''marian''', '''FlaxMarianModel'''), ('''mbart''', '''FlaxMBartModel'''), ('''mt5''', '''FlaxMT5Model'''), ('''opt''', '''FlaxOPTModel'''), ('''pegasus''', '''FlaxPegasusModel'''), ('''regnet''', '''FlaxRegNetModel'''), ('''resnet''', '''FlaxResNetModel'''), ('''roberta''', '''FlaxRobertaModel'''), ('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormModel'''), ('''roformer''', '''FlaxRoFormerModel'''), ('''t5''', '''FlaxT5Model'''), ('''vision-text-dual-encoder''', '''FlaxVisionTextDualEncoderModel'''), ('''vit''', '''FlaxViTModel'''), ('''wav2vec2''', '''FlaxWav2Vec2Model'''), ('''whisper''', '''FlaxWhisperModel'''), ('''xglm''', '''FlaxXGLMModel'''), ('''xlm-roberta''', '''FlaxXLMRobertaModel'''), ] ) UpperCAmelCase_ : Tuple = OrderedDict( [ # Model for pre-training mapping ('''albert''', '''FlaxAlbertForPreTraining'''), ('''bart''', '''FlaxBartForConditionalGeneration'''), ('''bert''', '''FlaxBertForPreTraining'''), ('''big_bird''', '''FlaxBigBirdForPreTraining'''), ('''electra''', '''FlaxElectraForPreTraining'''), ('''longt5''', '''FlaxLongT5ForConditionalGeneration'''), ('''mbart''', '''FlaxMBartForConditionalGeneration'''), ('''mt5''', '''FlaxMT5ForConditionalGeneration'''), ('''roberta''', '''FlaxRobertaForMaskedLM'''), ('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMaskedLM'''), ('''roformer''', '''FlaxRoFormerForMaskedLM'''), ('''t5''', '''FlaxT5ForConditionalGeneration'''), ('''wav2vec2''', '''FlaxWav2Vec2ForPreTraining'''), ('''whisper''', '''FlaxWhisperForConditionalGeneration'''), ('''xlm-roberta''', '''FlaxXLMRobertaForMaskedLM'''), ] ) UpperCAmelCase_ : List[str] = OrderedDict( [ # Model for Masked LM mapping ('''albert''', '''FlaxAlbertForMaskedLM'''), ('''bart''', '''FlaxBartForConditionalGeneration'''), ('''bert''', '''FlaxBertForMaskedLM'''), ('''big_bird''', '''FlaxBigBirdForMaskedLM'''), ('''distilbert''', '''FlaxDistilBertForMaskedLM'''), ('''electra''', '''FlaxElectraForMaskedLM'''), ('''mbart''', '''FlaxMBartForConditionalGeneration'''), ('''roberta''', '''FlaxRobertaForMaskedLM'''), ('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMaskedLM'''), ('''roformer''', '''FlaxRoFormerForMaskedLM'''), ('''xlm-roberta''', '''FlaxXLMRobertaForMaskedLM'''), ] ) UpperCAmelCase_ : str = OrderedDict( [ # Model for Seq2Seq Causal LM mapping ('''bart''', '''FlaxBartForConditionalGeneration'''), ('''blenderbot''', '''FlaxBlenderbotForConditionalGeneration'''), ('''blenderbot-small''', '''FlaxBlenderbotSmallForConditionalGeneration'''), ('''encoder-decoder''', '''FlaxEncoderDecoderModel'''), ('''longt5''', '''FlaxLongT5ForConditionalGeneration'''), ('''marian''', '''FlaxMarianMTModel'''), ('''mbart''', '''FlaxMBartForConditionalGeneration'''), ('''mt5''', '''FlaxMT5ForConditionalGeneration'''), ('''pegasus''', '''FlaxPegasusForConditionalGeneration'''), ('''t5''', '''FlaxT5ForConditionalGeneration'''), ] ) UpperCAmelCase_ : List[Any] = OrderedDict( [ # Model for Image-classsification ('''beit''', '''FlaxBeitForImageClassification'''), ('''regnet''', '''FlaxRegNetForImageClassification'''), ('''resnet''', '''FlaxResNetForImageClassification'''), ('''vit''', '''FlaxViTForImageClassification'''), ] ) UpperCAmelCase_ : Any = OrderedDict( [ ('''vision-encoder-decoder''', '''FlaxVisionEncoderDecoderModel'''), ] ) UpperCAmelCase_ : str = OrderedDict( [ # Model for Causal LM mapping ('''bart''', '''FlaxBartForCausalLM'''), ('''bert''', '''FlaxBertForCausalLM'''), ('''big_bird''', '''FlaxBigBirdForCausalLM'''), ('''electra''', '''FlaxElectraForCausalLM'''), ('''gpt-sw3''', '''FlaxGPT2LMHeadModel'''), ('''gpt2''', '''FlaxGPT2LMHeadModel'''), ('''gpt_neo''', '''FlaxGPTNeoForCausalLM'''), ('''gptj''', '''FlaxGPTJForCausalLM'''), ('''opt''', '''FlaxOPTForCausalLM'''), ('''roberta''', '''FlaxRobertaForCausalLM'''), ('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForCausalLM'''), ('''xglm''', '''FlaxXGLMForCausalLM'''), ('''xlm-roberta''', '''FlaxXLMRobertaForCausalLM'''), ] ) UpperCAmelCase_ : int = OrderedDict( [ # Model for Sequence Classification mapping ('''albert''', '''FlaxAlbertForSequenceClassification'''), ('''bart''', '''FlaxBartForSequenceClassification'''), ('''bert''', '''FlaxBertForSequenceClassification'''), ('''big_bird''', '''FlaxBigBirdForSequenceClassification'''), ('''distilbert''', '''FlaxDistilBertForSequenceClassification'''), ('''electra''', '''FlaxElectraForSequenceClassification'''), ('''mbart''', '''FlaxMBartForSequenceClassification'''), ('''roberta''', '''FlaxRobertaForSequenceClassification'''), ('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForSequenceClassification'''), ('''roformer''', '''FlaxRoFormerForSequenceClassification'''), ('''xlm-roberta''', '''FlaxXLMRobertaForSequenceClassification'''), ] ) UpperCAmelCase_ : int = OrderedDict( [ # Model for Question Answering mapping ('''albert''', '''FlaxAlbertForQuestionAnswering'''), ('''bart''', '''FlaxBartForQuestionAnswering'''), ('''bert''', '''FlaxBertForQuestionAnswering'''), ('''big_bird''', '''FlaxBigBirdForQuestionAnswering'''), ('''distilbert''', '''FlaxDistilBertForQuestionAnswering'''), ('''electra''', '''FlaxElectraForQuestionAnswering'''), ('''mbart''', '''FlaxMBartForQuestionAnswering'''), ('''roberta''', '''FlaxRobertaForQuestionAnswering'''), ('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForQuestionAnswering'''), ('''roformer''', '''FlaxRoFormerForQuestionAnswering'''), ('''xlm-roberta''', '''FlaxXLMRobertaForQuestionAnswering'''), ] ) UpperCAmelCase_ : Any = OrderedDict( [ # Model for Token Classification mapping ('''albert''', '''FlaxAlbertForTokenClassification'''), ('''bert''', '''FlaxBertForTokenClassification'''), ('''big_bird''', '''FlaxBigBirdForTokenClassification'''), ('''distilbert''', '''FlaxDistilBertForTokenClassification'''), ('''electra''', '''FlaxElectraForTokenClassification'''), ('''roberta''', '''FlaxRobertaForTokenClassification'''), ('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForTokenClassification'''), ('''roformer''', '''FlaxRoFormerForTokenClassification'''), ('''xlm-roberta''', '''FlaxXLMRobertaForTokenClassification'''), ] ) UpperCAmelCase_ : Optional[Any] = OrderedDict( [ # Model for Multiple Choice mapping ('''albert''', '''FlaxAlbertForMultipleChoice'''), ('''bert''', '''FlaxBertForMultipleChoice'''), ('''big_bird''', '''FlaxBigBirdForMultipleChoice'''), ('''distilbert''', '''FlaxDistilBertForMultipleChoice'''), ('''electra''', '''FlaxElectraForMultipleChoice'''), ('''roberta''', '''FlaxRobertaForMultipleChoice'''), ('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMultipleChoice'''), ('''roformer''', '''FlaxRoFormerForMultipleChoice'''), ('''xlm-roberta''', '''FlaxXLMRobertaForMultipleChoice'''), ] ) UpperCAmelCase_ : Optional[Any] = OrderedDict( [ ('''bert''', '''FlaxBertForNextSentencePrediction'''), ] ) UpperCAmelCase_ : Any = OrderedDict( [ ('''speech-encoder-decoder''', '''FlaxSpeechEncoderDecoderModel'''), ('''whisper''', '''FlaxWhisperForConditionalGeneration'''), ] ) UpperCAmelCase_ : int = OrderedDict( [ ('''whisper''', '''FlaxWhisperForAudioClassification'''), ] ) UpperCAmelCase_ : Optional[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES) UpperCAmelCase_ : int = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES) UpperCAmelCase_ : Optional[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES) UpperCAmelCase_ : List[Any] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES ) UpperCAmelCase_ : List[str] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES ) UpperCAmelCase_ : Tuple = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES) UpperCAmelCase_ : Optional[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES) UpperCAmelCase_ : Dict = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES ) UpperCAmelCase_ : Optional[Any] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES ) UpperCAmelCase_ : Any = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES ) UpperCAmelCase_ : Union[str, Any] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES ) UpperCAmelCase_ : Optional[Any] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES ) UpperCAmelCase_ : int = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES ) UpperCAmelCase_ : Any = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES ) class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ): snake_case__ : int = FLAX_MODEL_MAPPING UpperCAmelCase_ : str = auto_class_update(FlaxAutoModel) class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ): snake_case__ : Any = FLAX_MODEL_FOR_PRETRAINING_MAPPING UpperCAmelCase_ : List[str] = auto_class_update(FlaxAutoModelForPreTraining, head_doc='''pretraining''') class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ): snake_case__ : List[Any] = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING UpperCAmelCase_ : Tuple = auto_class_update(FlaxAutoModelForCausalLM, head_doc='''causal language modeling''') class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ): snake_case__ : Optional[Any] = FLAX_MODEL_FOR_MASKED_LM_MAPPING UpperCAmelCase_ : Any = auto_class_update(FlaxAutoModelForMaskedLM, head_doc='''masked language modeling''') class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ): snake_case__ : int = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING UpperCAmelCase_ : int = auto_class_update( FlaxAutoModelForSeqaSeqLM, head_doc='''sequence-to-sequence language modeling''', checkpoint_for_example='''t5-base''' ) class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ): snake_case__ : str = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING UpperCAmelCase_ : Union[str, Any] = auto_class_update( FlaxAutoModelForSequenceClassification, head_doc='''sequence classification''' ) class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ): snake_case__ : int = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING UpperCAmelCase_ : List[Any] = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc='''question answering''') class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ): snake_case__ : Union[str, Any] = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING UpperCAmelCase_ : str = auto_class_update( FlaxAutoModelForTokenClassification, head_doc='''token classification''' ) class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ): snake_case__ : Any = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING UpperCAmelCase_ : Optional[Any] = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc='''multiple choice''') class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ): snake_case__ : List[Any] = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING UpperCAmelCase_ : int = auto_class_update( FlaxAutoModelForNextSentencePrediction, head_doc='''next sentence prediction''' ) class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ): snake_case__ : Optional[Any] = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING UpperCAmelCase_ : Optional[int] = auto_class_update( FlaxAutoModelForImageClassification, head_doc='''image classification''' ) class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ): snake_case__ : Dict = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING UpperCAmelCase_ : Union[str, Any] = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc='''vision-to-text modeling''') class _SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ): snake_case__ : Union[str, Any] = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING UpperCAmelCase_ : Union[str, Any] = auto_class_update( FlaxAutoModelForSpeechSeqaSeq, head_doc='''sequence-to-sequence speech-to-text modeling''' )
38
from collections.abc import Generator from math import sin def SCREAMING_SNAKE_CASE_ ( __magic_name__ : bytes ) -> bytes: """simple docstring""" if len(__magic_name__ ) != 32: raise ValueError("""Input must be of length 32""" ) UpperCamelCase :int = B"""""" for i in [3, 2, 1, 0]: little_endian += string_aa[8 * i : 8 * i + 8] return little_endian def SCREAMING_SNAKE_CASE_ ( __magic_name__ : int ) -> bytes: """simple docstring""" if i < 0: raise ValueError("""Input must be non-negative""" ) UpperCamelCase :Any = format(__magic_name__ , """08x""" )[-8:] UpperCamelCase :Union[str, Any] = B"""""" for i in [3, 2, 1, 0]: little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode("""utf-8""" ) return little_endian_hex def SCREAMING_SNAKE_CASE_ ( __magic_name__ : bytes ) -> bytes: """simple docstring""" UpperCamelCase :str = B"""""" for char in message: bit_string += format(__magic_name__ , """08b""" ).encode("""utf-8""" ) UpperCamelCase :Any = format(len(__magic_name__ ) , """064b""" ).encode("""utf-8""" ) # Pad bit_string to a multiple of 512 chars bit_string += b"1" while len(__magic_name__ ) % 512 != 448: bit_string += b"0" bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] ) return bit_string def SCREAMING_SNAKE_CASE_ ( __magic_name__ : bytes ) -> Generator[list[int], None, None]: """simple docstring""" if len(__magic_name__ ) % 512 != 0: raise ValueError("""Input must have length that's a multiple of 512""" ) for pos in range(0 , len(__magic_name__ ) , 512 ): UpperCamelCase :Tuple = bit_string[pos : pos + 512] UpperCamelCase :Optional[int] = [] for i in range(0 , 512 , 32 ): block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) ) yield block_words def SCREAMING_SNAKE_CASE_ ( __magic_name__ : int ) -> int: """simple docstring""" if i < 0: raise ValueError("""Input must be non-negative""" ) UpperCamelCase :List[str] = format(__magic_name__ , """032b""" ) UpperCamelCase :Any = """""" for c in i_str: new_str += "1" if c == "0" else "0" return int(__magic_name__ , 2 ) def SCREAMING_SNAKE_CASE_ ( __magic_name__ : int , __magic_name__ : int ) -> int: """simple docstring""" return (a + b) % 2**32 def SCREAMING_SNAKE_CASE_ ( __magic_name__ : int , __magic_name__ : int ) -> int: """simple docstring""" if i < 0: raise ValueError("""Input must be non-negative""" ) if shift < 0: raise ValueError("""Shift must be non-negative""" ) return ((i << shift) ^ (i >> (32 - shift))) % 2**32 def SCREAMING_SNAKE_CASE_ ( __magic_name__ : bytes ) -> bytes: """simple docstring""" UpperCamelCase :Tuple = preprocess(__magic_name__ ) UpperCamelCase :List[str] = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )] # Starting states UpperCamelCase :Union[str, Any] = 0X67_45_23_01 UpperCamelCase :Union[str, Any] = 0XEF_CD_AB_89 UpperCamelCase :List[str] = 0X98_BA_DC_FE UpperCamelCase :int = 0X10_32_54_76 UpperCamelCase :int = [ 7, 12, 17, 22, 7, 12, 17, 22, 7, 12, 17, 22, 7, 12, 17, 22, 5, 9, 14, 20, 5, 9, 14, 20, 5, 9, 14, 20, 5, 9, 14, 20, 4, 11, 16, 23, 4, 11, 16, 23, 4, 11, 16, 23, 4, 11, 16, 23, 6, 10, 15, 21, 6, 10, 15, 21, 6, 10, 15, 21, 6, 10, 15, 21, ] # Process bit string in chunks, each with 16 32-char words for block_words in get_block_words(__magic_name__ ): UpperCamelCase :Optional[Any] = aa UpperCamelCase :Any = ba UpperCamelCase :Tuple = ca UpperCamelCase :List[str] = da # Hash current chunk for i in range(64 ): if i <= 15: # f = (b & c) | (not_32(b) & d) # Alternate definition for f UpperCamelCase :int = d ^ (b & (c ^ d)) UpperCamelCase :Optional[int] = i elif i <= 31: # f = (d & b) | (not_32(d) & c) # Alternate definition for f UpperCamelCase :str = c ^ (d & (b ^ c)) UpperCamelCase :Union[str, Any] = (5 * i + 1) % 16 elif i <= 47: UpperCamelCase :str = b ^ c ^ d UpperCamelCase :Optional[int] = (3 * i + 5) % 16 else: UpperCamelCase :List[str] = c ^ (b | not_aa(__magic_name__ )) UpperCamelCase :int = (7 * i) % 16 UpperCamelCase :Dict = (f + a + added_consts[i] + block_words[g]) % 2**32 UpperCamelCase :Tuple = d UpperCamelCase :str = c UpperCamelCase :Tuple = b UpperCamelCase :Optional[Any] = sum_aa(__magic_name__ , left_rotate_aa(__magic_name__ , shift_amounts[i] ) ) # Add hashed chunk to running total UpperCamelCase :List[str] = sum_aa(__magic_name__ , __magic_name__ ) UpperCamelCase :str = sum_aa(__magic_name__ , __magic_name__ ) UpperCamelCase :int = sum_aa(__magic_name__ , __magic_name__ ) UpperCamelCase :Optional[Any] = sum_aa(__magic_name__ , __magic_name__ ) UpperCamelCase :Optional[Any] = reformat_hex(__magic_name__ ) + reformat_hex(__magic_name__ ) + reformat_hex(__magic_name__ ) + reformat_hex(__magic_name__ ) return digest if __name__ == "__main__": import doctest doctest.testmod()
38
1
UpperCAmelCase_ : dict[tuple[int, int, int], int] = {} def SCREAMING_SNAKE_CASE_ ( __magic_name__ : int , __magic_name__ : int , __magic_name__ : int ) -> int: """simple docstring""" if late == 3 or absent == 2: return 0 # if we have no days left, and have not failed any other rules, # we have a prize string if days == 0: return 1 # No easy solution, so now we need to do the recursive calculation # First, check if the combination is already in the cache, and # if yes, return the stored value from there since we already # know the number of possible prize strings from this point on UpperCamelCase :List[str] = (days, absent, late) if key in cache: return cache[key] # now we calculate the three possible ways that can unfold from # this point on, depending on our attendance today # 1) if we are late (but not absent), the "absent" counter stays as # it is, but the "late" counter increases by one UpperCamelCase :List[str] = _calculate(days - 1 , __magic_name__ , late + 1 ) # 2) if we are absent, the "absent" counter increases by 1, and the # "late" counter resets to 0 UpperCamelCase :Union[str, Any] = _calculate(days - 1 , absent + 1 , 0 ) # 3) if we are on time, this resets the "late" counter and keeps the # absent counter UpperCamelCase :List[str] = _calculate(days - 1 , __magic_name__ , 0 ) UpperCamelCase :Union[str, Any] = state_late + state_absent + state_ontime UpperCamelCase :Optional[int] = prizestrings return prizestrings def SCREAMING_SNAKE_CASE_ ( __magic_name__ : int = 30 ) -> int: """simple docstring""" return _calculate(__magic_name__ , absent=0 , late=0 ) if __name__ == "__main__": print(solution())
38
from typing import Callable, Optional from .. import Features from ..packaged_modules.generator.generator import Generator from .abc import AbstractDatasetInputStream class _SCREAMING_SNAKE_CASE ( _a ): def __init__( self : List[Any] , __lowerCamelCase : Callable , __lowerCamelCase : Optional[Features] = None , __lowerCamelCase : str = None , __lowerCamelCase : bool = False , __lowerCamelCase : bool = False , __lowerCamelCase : Optional[dict] = None , __lowerCamelCase : Optional[int] = None , **__lowerCamelCase : List[Any] , ): super().__init__( features=__lowerCamelCase , cache_dir=__lowerCamelCase , keep_in_memory=__lowerCamelCase , streaming=__lowerCamelCase , num_proc=__lowerCamelCase , **__lowerCamelCase , ) UpperCamelCase :Union[str, Any] = Generator( cache_dir=__lowerCamelCase , features=__lowerCamelCase , generator=__lowerCamelCase , gen_kwargs=__lowerCamelCase , **__lowerCamelCase , ) def _A ( self : List[str] ): # Build iterable dataset if self.streaming: UpperCamelCase :Any = self.builder.as_streaming_dataset(split="""train""" ) # Build regular (map-style) dataset else: UpperCamelCase :Tuple = None UpperCamelCase :Dict = None UpperCamelCase :Dict = None UpperCamelCase :List[str] = None self.builder.download_and_prepare( download_config=__lowerCamelCase , download_mode=__lowerCamelCase , verification_mode=__lowerCamelCase , base_path=__lowerCamelCase , num_proc=self.num_proc , ) UpperCamelCase :Tuple = self.builder.as_dataset( split="""train""" , verification_mode=__lowerCamelCase , in_memory=self.keep_in_memory ) return dataset
38
1
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : List[Any] ) -> Union[str, Any]: """simple docstring""" UpperCamelCase :Union[str, Any] = [0] * len(__magic_name__ ) UpperCamelCase :int = [] UpperCamelCase :str = [] UpperCamelCase :str = 0 for values in graph.values(): for i in values: indegree[i] += 1 for i in range(len(__magic_name__ ) ): if indegree[i] == 0: queue.append(__magic_name__ ) while queue: UpperCamelCase :str = queue.pop(0 ) cnt += 1 topo.append(__magic_name__ ) for x in graph[vertex]: indegree[x] -= 1 if indegree[x] == 0: queue.append(__magic_name__ ) if cnt != len(__magic_name__ ): print("""Cycle exists""" ) else: print(__magic_name__ ) # Adjacency List of Graph UpperCAmelCase_ : str = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []} topological_sort(graph)
38
import argparse import json import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils.deepspeed import DummyOptim, DummyScheduler UpperCAmelCase_ : Union[str, Any] = 16 UpperCAmelCase_ : int = 32 def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Accelerator , __magic_name__ : int = 16 , __magic_name__ : str = "bert-base-cased" ) -> Dict: """simple docstring""" UpperCamelCase :List[str] = AutoTokenizer.from_pretrained(__magic_name__ ) UpperCamelCase :Union[str, Any] = load_dataset("""glue""" , """mrpc""" ) def tokenize_function(__magic_name__ : Tuple ): # max_length=None => use the model max length (it's actually the default) UpperCamelCase :List[Any] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=__magic_name__ , max_length=__magic_name__ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset UpperCamelCase :List[Any] = datasets.map( __magic_name__ , batched=__magic_name__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , load_from_cache_file=__magic_name__ ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library UpperCamelCase :Optional[Any] = tokenized_datasets.rename_column("""label""" , """labels""" ) def collate_fn(__magic_name__ : Any ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(__magic_name__ , padding="""max_length""" , max_length=128 , return_tensors="""pt""" ) return tokenizer.pad(__magic_name__ , padding="""longest""" , return_tensors="""pt""" ) # Instantiate dataloaders. UpperCamelCase :List[str] = DataLoader( tokenized_datasets["""train"""] , shuffle=__magic_name__ , collate_fn=__magic_name__ , batch_size=__magic_name__ ) UpperCamelCase :List[Any] = DataLoader( tokenized_datasets["""validation"""] , shuffle=__magic_name__ , collate_fn=__magic_name__ , batch_size=__magic_name__ ) return train_dataloader, eval_dataloader def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Dict , __magic_name__ : Optional[Any] ) -> List[Any]: """simple docstring""" UpperCamelCase :Optional[Any] = Accelerator() # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs UpperCamelCase :Union[str, Any] = config["""lr"""] UpperCamelCase :List[str] = int(config["""num_epochs"""] ) UpperCamelCase :str = int(config["""seed"""] ) UpperCamelCase :Dict = int(config["""batch_size"""] ) UpperCamelCase :Union[str, Any] = args.model_name_or_path set_seed(__magic_name__ ) UpperCamelCase , UpperCamelCase :Dict = get_dataloaders(__magic_name__ , __magic_name__ , __magic_name__ ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) UpperCamelCase :List[str] = AutoModelForSequenceClassification.from_pretrained(__magic_name__ , return_dict=__magic_name__ ) # Instantiate optimizer UpperCamelCase :Union[str, Any] = ( AdamW if accelerator.state.deepspeed_plugin is None or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config else DummyOptim ) UpperCamelCase :Optional[Any] = optimizer_cls(params=model.parameters() , lr=__magic_name__ ) if accelerator.state.deepspeed_plugin is not None: UpperCamelCase :Any = accelerator.state.deepspeed_plugin.deepspeed_config[ """gradient_accumulation_steps""" ] else: UpperCamelCase :Any = 1 UpperCamelCase :Dict = (len(__magic_name__ ) * num_epochs) // gradient_accumulation_steps # Instantiate scheduler if ( accelerator.state.deepspeed_plugin is None or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config ): UpperCamelCase :List[Any] = get_linear_schedule_with_warmup( optimizer=__magic_name__ , num_warmup_steps=0 , num_training_steps=__magic_name__ , ) else: UpperCamelCase :Any = DummyScheduler(__magic_name__ , total_num_steps=__magic_name__ , warmup_num_steps=0 ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase :str = accelerator.prepare( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) # We need to keep track of how many total steps we have iterated over UpperCamelCase :int = 0 # We also need to keep track of the stating epoch so files are named properly UpperCamelCase :Tuple = 0 # Now we train the model UpperCamelCase :Any = evaluate.load("""glue""" , """mrpc""" ) UpperCamelCase :Tuple = 0 UpperCamelCase :List[Any] = {} for epoch in range(__magic_name__ , __magic_name__ ): model.train() for step, batch in enumerate(__magic_name__ ): UpperCamelCase :List[str] = model(**__magic_name__ ) UpperCamelCase :Dict = outputs.loss UpperCamelCase :Optional[int] = loss / gradient_accumulation_steps accelerator.backward(__magic_name__ ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() overall_step += 1 model.eval() UpperCamelCase :str = 0 for step, batch in enumerate(__magic_name__ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): UpperCamelCase :Optional[int] = model(**__magic_name__ ) UpperCamelCase :List[Any] = outputs.logits.argmax(dim=-1 ) # It is slightly faster to call this once, than multiple times UpperCamelCase , UpperCamelCase :Optional[int] = accelerator.gather( (predictions, batch["""labels"""]) ) # If we are in a multiprocess environment, the last batch has duplicates if accelerator.use_distributed: if step == len(__magic_name__ ) - 1: UpperCamelCase :Dict = predictions[: len(eval_dataloader.dataset ) - samples_seen] UpperCamelCase :List[str] = references[: len(eval_dataloader.dataset ) - samples_seen] else: samples_seen += references.shape[0] metric.add_batch( predictions=__magic_name__ , references=__magic_name__ , ) UpperCamelCase :List[str] = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f"""epoch {epoch}:""" , __magic_name__ ) UpperCamelCase :Dict = eval_metric["""accuracy"""] if best_performance < eval_metric["accuracy"]: UpperCamelCase :str = eval_metric["""accuracy"""] if args.performance_lower_bound is not None: assert ( args.performance_lower_bound <= best_performance ), f"""Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}""" accelerator.wait_for_everyone() if accelerator.is_main_process: with open(os.path.join(args.output_dir , """all_results.json""" ) , """w""" ) as f: json.dump(__magic_name__ , __magic_name__ ) def SCREAMING_SNAKE_CASE_ ( ) -> Tuple: """simple docstring""" UpperCamelCase :List[str] = argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""" ) parser.add_argument( """--model_name_or_path""" , type=__magic_name__ , default="""bert-base-cased""" , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=__magic_name__ , ) parser.add_argument( """--output_dir""" , type=__magic_name__ , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , ) parser.add_argument( """--performance_lower_bound""" , type=__magic_name__ , default=__magic_name__ , help="""Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.""" , ) parser.add_argument( """--num_epochs""" , type=__magic_name__ , default=3 , help="""Number of train epochs.""" , ) UpperCamelCase :str = parser.parse_args() UpperCamelCase :Any = {"""lr""": 2E-5, """num_epochs""": args.num_epochs, """seed""": 42, """batch_size""": 16} training_function(__magic_name__ , __magic_name__ ) if __name__ == "__main__": main()
38
1
import numpy as np from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey def SCREAMING_SNAKE_CASE_ ( __magic_name__ : int , __magic_name__ : int , __magic_name__ : int , __magic_name__ : int , __magic_name__ : int , __magic_name__ : int ) -> np.ndarray: """simple docstring""" if (ksize % 2) == 0: UpperCamelCase :Optional[Any] = ksize + 1 UpperCamelCase :Optional[int] = np.zeros((ksize, ksize) , dtype=np.floataa ) # each value for y in range(__magic_name__ ): for x in range(__magic_name__ ): # distance from center UpperCamelCase :Optional[Any] = x - ksize // 2 UpperCamelCase :Optional[Any] = y - ksize // 2 # degree to radiant UpperCamelCase :str = theta / 180 * np.pi UpperCamelCase :int = np.cos(_theta ) UpperCamelCase :List[Any] = np.sin(_theta ) # get kernel x UpperCamelCase :Any = cos_theta * px + sin_theta * py # get kernel y UpperCamelCase :Dict = -sin_theta * px + cos_theta * py # fill kernel UpperCamelCase :Optional[Any] = np.exp( -(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi ) return gabor if __name__ == "__main__": import doctest doctest.testmod() # read original image UpperCAmelCase_ : List[str] = imread('''../image_data/lena.jpg''') # turn image in gray scale value UpperCAmelCase_ : str = cvtColor(img, COLOR_BGR2GRAY) # Apply multiple Kernel to detect edges UpperCAmelCase_ : List[Any] = np.zeros(gray.shape[:2]) for theta in [0, 30, 60, 90, 1_20, 1_50]: UpperCAmelCase_ : Union[str, Any] = gabor_filter_kernel(10, 8, theta, 10, 0, 0) out += filteraD(gray, CV_8UC3, kernel_aa) UpperCAmelCase_ : Any = out / out.max() * 2_55 UpperCAmelCase_ : Dict = out.astype(np.uinta) imshow('''Original''', gray) imshow('''Gabor filter with 20x20 mask and 6 directions''', out) waitKey(0)
38
import os import unittest from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer from ...test_tokenization_common import TokenizerTesterMixin class _SCREAMING_SNAKE_CASE ( _a , unittest.TestCase ): snake_case__ : Optional[Any] = TransfoXLTokenizer snake_case__ : List[Any] = False snake_case__ : Tuple = False def _A ( self : str ): super().setUp() UpperCamelCase :Dict = [ """<unk>""", """[CLS]""", """[SEP]""", """want""", """unwanted""", """wa""", """un""", """running""", """,""", """low""", """l""", ] UpperCamelCase :str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) ) def _A ( self : List[str] , **__lowerCamelCase : Any ): UpperCamelCase :Any = True return TransfoXLTokenizer.from_pretrained(self.tmpdirname , **__lowerCamelCase ) def _A ( self : Any , __lowerCamelCase : int ): UpperCamelCase :List[Any] = """<unk> UNwanted , running""" UpperCamelCase :int = """<unk> unwanted, running""" return input_text, output_text def _A ( self : Tuple ): UpperCamelCase :List[str] = TransfoXLTokenizer(vocab_file=self.vocab_file , lower_case=__lowerCamelCase ) UpperCamelCase :Any = tokenizer.tokenize("""<unk> UNwanted , running""" ) self.assertListEqual(__lowerCamelCase , ["""<unk>""", """unwanted""", """,""", """running"""] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCamelCase ) , [0, 4, 8, 7] ) def _A ( self : Optional[Any] ): UpperCamelCase :List[Any] = TransfoXLTokenizer(lower_case=__lowerCamelCase ) self.assertListEqual( tokenizer.tokenize(""" \tHeLLo ! how \n Are yoU ? """ ) , ["""hello""", """!""", """how""", """are""", """you""", """?"""] ) def _A ( self : Union[str, Any] ): UpperCamelCase :int = TransfoXLTokenizer(lower_case=__lowerCamelCase ) self.assertListEqual( tokenizer.tokenize(""" \tHeLLo ! how \n Are yoU ? """ ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""] ) def _A ( self : Tuple ): UpperCamelCase :Any = TransfoXLTokenizer(lower_case=__lowerCamelCase ) UpperCamelCase :Optional[int] = """Hello (bracket) and side-scrolled [and] Henry's $5,000 with 3.34 m. What's up!?""" UpperCamelCase :Optional[int] = [ """Hello""", """(""", """bracket""", """)""", """and""", """side""", """@-@""", """scrolled""", """[""", """and""", """]""", """Henry""", """'s""", """$""", """5""", """@,@""", """000""", """with""", """3""", """@.@""", """34""", """m""", """.""", """What""", """'s""", """up""", """!""", """?""", ] self.assertListEqual(tokenizer.tokenize(__lowerCamelCase ) , __lowerCamelCase ) self.assertEqual(tokenizer.convert_tokens_to_string(__lowerCamelCase ) , __lowerCamelCase ) def _A ( self : List[Any] ): UpperCamelCase :Any = self.get_tokenizer() UpperCamelCase :List[str] = len(__lowerCamelCase ) tokenizer.add_tokens(["""new1""", """new2"""] ) tokenizer.move_added_token("""new1""" , 1 ) # Check that moved token is not copied (duplicate) self.assertEqual(len(__lowerCamelCase ) , original_len + 2 ) # Check that token is moved to specified id self.assertEqual(tokenizer.encode("""new1""" ) , [1] ) self.assertEqual(tokenizer.decode([1] ) , """new1""" )
38
1
from sklearn.metrics import mean_squared_error import datasets UpperCAmelCase_ : int = '''\ @article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011} } ''' UpperCAmelCase_ : str = '''\ Mean Squared Error(MSE) is the average of the square of difference between the predicted and actual values. ''' UpperCAmelCase_ : Optional[int] = ''' Args: predictions: array-like of shape (n_samples,) or (n_samples, n_outputs) Estimated target values. references: array-like of shape (n_samples,) or (n_samples, n_outputs) Ground truth (correct) target values. sample_weight: array-like of shape (n_samples,), default=None Sample weights. multioutput: {"raw_values", "uniform_average"} or array-like of shape (n_outputs,), default="uniform_average" Defines aggregating of multiple output values. Array-like value defines weights used to average errors. "raw_values" : Returns a full set of errors in case of multioutput input. "uniform_average" : Errors of all outputs are averaged with uniform weight. squared : bool, default=True If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value. Returns: mse : mean squared error. Examples: >>> mse_metric = datasets.load_metric("mse") >>> predictions = [2.5, 0.0, 2, 8] >>> references = [3, -0.5, 2, 7] >>> results = mse_metric.compute(predictions=predictions, references=references) >>> print(results) {\'mse\': 0.375} >>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False) >>> print(rmse_result) {\'mse\': 0.6123724356957945} If you\'re using multi-dimensional lists, then set the config as follows : >>> mse_metric = datasets.load_metric("mse", "multilist") >>> predictions = [[0.5, 1], [-1, 1], [7, -6]] >>> references = [[0, 2], [-1, 2], [8, -5]] >>> results = mse_metric.compute(predictions=predictions, references=references) >>> print(results) {\'mse\': 0.7083333333333334} >>> results = mse_metric.compute(predictions=predictions, references=references, multioutput=\'raw_values\') >>> print(results) # doctest: +NORMALIZE_WHITESPACE {\'mse\': array([0.41666667, 1. ])} ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class _SCREAMING_SNAKE_CASE ( datasets.Metric ): def _A ( self : Dict ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[ """https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html""" ] , ) def _A ( self : str ): if self.config_name == "multilist": return { "predictions": datasets.Sequence(datasets.Value("""float""" ) ), "references": datasets.Sequence(datasets.Value("""float""" ) ), } else: return { "predictions": datasets.Value("""float""" ), "references": datasets.Value("""float""" ), } def _A ( self : Union[str, Any] , __lowerCamelCase : str , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[Any]=None , __lowerCamelCase : List[Any]="uniform_average" , __lowerCamelCase : int=True ): UpperCamelCase :Optional[int] = mean_squared_error( __lowerCamelCase , __lowerCamelCase , sample_weight=__lowerCamelCase , multioutput=__lowerCamelCase , squared=__lowerCamelCase ) return {"mse": mse}
38
import argparse import torch # Step 1. clone https://github.com/microsoft/unilm # Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd # Step 3. cd unilm # Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink # import classes from unilm.wavlm.WavLM import WavLM as WavLMOrig from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig from transformers import WavLMConfig, WavLMModel, logging logging.set_verbosity_info() UpperCAmelCase_ : Optional[Any] = logging.get_logger(__name__) UpperCAmelCase_ : Optional[Any] = { '''post_extract_proj''': '''feature_projection.projection''', '''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''', '''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''', '''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''', '''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''', '''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''', '''self_attn.grep_linear''': '''encoder.layers.*.attention.gru_rel_pos_linear''', '''self_attn.relative_attention_bias''': '''encoder.layers.*.attention.rel_attn_embed''', '''self_attn.grep_a''': '''encoder.layers.*.attention.gru_rel_pos_const''', '''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''', '''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''', '''fc2''': '''encoder.layers.*.feed_forward.output_dense''', '''final_layer_norm''': '''encoder.layers.*.final_layer_norm''', '''encoder.layer_norm''': '''encoder.layer_norm''', '''w2v_model.layer_norm''': '''feature_projection.layer_norm''', '''quantizer.weight_proj''': '''quantizer.weight_proj''', '''quantizer.vars''': '''quantizer.codevectors''', '''project_q''': '''project_q''', '''final_proj''': '''project_hid''', '''w2v_encoder.proj''': '''ctc_proj''', '''mask_emb''': '''masked_spec_embed''', } UpperCAmelCase_ : int = [ '''ctc_proj''', '''quantizer.weight_proj''', '''quantizer.codevectors''', '''project_q''', '''project_hid''', ] def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Dict , __magic_name__ : Dict , __magic_name__ : Union[str, Any] , __magic_name__ : Tuple , __magic_name__ : Optional[int] ) -> Dict: """simple docstring""" for attribute in key.split(""".""" ): UpperCamelCase :Dict = getattr(__magic_name__ , __magic_name__ ) if weight_type is not None: UpperCamelCase :Optional[int] = getattr(__magic_name__ , __magic_name__ ).shape else: UpperCamelCase :Optional[int] = hf_pointer.shape assert hf_shape == value.shape, ( f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be""" f""" {value.shape} for {full_name}""" ) if weight_type == "weight": UpperCamelCase :str = value elif weight_type == "weight_g": UpperCamelCase :int = value elif weight_type == "weight_v": UpperCamelCase :int = value elif weight_type == "bias": UpperCamelCase :List[Any] = value else: UpperCamelCase :Any = value logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" ) def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Optional[Any] , __magic_name__ : List[str] ) -> Optional[Any]: """simple docstring""" UpperCamelCase :Union[str, Any] = [] UpperCamelCase :Dict = fairseq_model.state_dict() UpperCamelCase :int = hf_model.feature_extractor for name, value in fairseq_dict.items(): UpperCamelCase :str = False if "conv_layers" in name: load_conv_layer( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , hf_model.config.feat_extract_norm == """group""" , ) UpperCamelCase :Union[str, Any] = True else: for key, mapped_key in MAPPING.items(): if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]: UpperCamelCase :Optional[int] = True if "*" in mapped_key: UpperCamelCase :List[Any] = name.split(__magic_name__ )[0].split(""".""" )[-2] UpperCamelCase :int = mapped_key.replace("""*""" , __magic_name__ ) if "weight_g" in name: UpperCamelCase :List[Any] = """weight_g""" elif "weight_v" in name: UpperCamelCase :List[Any] = """weight_v""" elif "bias" in name and "relative_attention_bias" not in name: UpperCamelCase :Any = """bias""" elif "weight" in name: # TODO: don't match quantizer.weight_proj UpperCamelCase :List[str] = """weight""" else: UpperCamelCase :Optional[int] = None set_recursively(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) continue if not is_used: unused_weights.append(__magic_name__ ) logger.warning(f"""Unused weights: {unused_weights}""" ) def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Any , __magic_name__ : str , __magic_name__ : int , __magic_name__ : int , __magic_name__ : List[str] ) -> Dict: """simple docstring""" UpperCamelCase :Dict = full_name.split("""conv_layers.""" )[-1] UpperCamelCase :int = name.split(""".""" ) UpperCamelCase :str = int(items[0] ) UpperCamelCase :str = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) UpperCamelCase :Tuple = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) UpperCamelCase :Dict = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was""" " found." ) UpperCamelCase :Tuple = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" ) UpperCamelCase :Union[str, Any] = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(__magic_name__ ) @torch.no_grad() def SCREAMING_SNAKE_CASE_ ( __magic_name__ : List[str] , __magic_name__ : List[Any] , __magic_name__ : str=None ) -> int: """simple docstring""" UpperCamelCase :List[Any] = torch.load(__magic_name__ ) UpperCamelCase :List[Any] = WavLMConfigOrig(checkpoint["""cfg"""] ) UpperCamelCase :int = WavLMOrig(__magic_name__ ) model.load_state_dict(checkpoint["""model"""] ) model.eval() if config_path is not None: UpperCamelCase :List[Any] = WavLMConfig.from_pretrained(__magic_name__ ) else: UpperCamelCase :Any = WavLMConfig() UpperCamelCase :Dict = WavLMModel(__magic_name__ ) recursively_load_weights(__magic_name__ , __magic_name__ ) hf_wavlm.save_pretrained(__magic_name__ ) if __name__ == "__main__": UpperCAmelCase_ : Union[str, Any] = argparse.ArgumentParser() parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') UpperCAmelCase_ : Optional[int] = parser.parse_args() convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
38
1
import itertools import json import linecache import os import pickle import re import socket import string from collections import Counter from logging import getLogger from pathlib import Path from typing import Callable, Dict, Iterable, List import git import torch from torch.utils.data import Dataset from transformers import BartTokenizer, RagTokenizer, TaTokenizer def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Tuple , __magic_name__ : Any , __magic_name__ : Tuple , __magic_name__ : Optional[Any] , __magic_name__ : str=True , __magic_name__ : List[Any]="pt" ) -> int: """simple docstring""" UpperCamelCase :Union[str, Any] = {"""add_prefix_space""": True} if isinstance(__magic_name__ , __magic_name__ ) and not line.startswith(""" """ ) else {} UpperCamelCase :Optional[Any] = padding_side return tokenizer( [line] , max_length=__magic_name__ , padding="""max_length""" if pad_to_max_length else None , truncation=__magic_name__ , return_tensors=__magic_name__ , add_special_tokens=__magic_name__ , **__magic_name__ , ) def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Union[str, Any] , __magic_name__ : Any , __magic_name__ : Optional[Any]=None , ) -> Optional[Any]: """simple docstring""" UpperCamelCase :Optional[int] = input_ids.ne(__magic_name__ ).any(dim=0 ) if attention_mask is None: return input_ids[:, keep_column_mask] else: return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask]) class _SCREAMING_SNAKE_CASE ( _a ): def __init__( self : int , __lowerCamelCase : List[str] , __lowerCamelCase : Any , __lowerCamelCase : int , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[Any]="train" , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : str=None , __lowerCamelCase : Dict=None , __lowerCamelCase : Optional[Any]="" , ): super().__init__() UpperCamelCase :str = Path(__lowerCamelCase ).joinpath(type_path + """.source""" ) UpperCamelCase :Dict = Path(__lowerCamelCase ).joinpath(type_path + """.target""" ) UpperCamelCase :str = self.get_char_lens(self.src_file ) UpperCamelCase :Tuple = max_source_length UpperCamelCase :List[Any] = max_target_length assert min(self.src_lens ) > 0, F"""found empty line in {self.src_file}""" UpperCamelCase :str = tokenizer UpperCamelCase :Any = prefix if n_obs is not None: UpperCamelCase :Dict = self.src_lens[:n_obs] UpperCamelCase :int = src_lang UpperCamelCase :Optional[int] = tgt_lang def __len__( self : List[str] ): return len(self.src_lens ) def __getitem__( self : Union[str, Any] , __lowerCamelCase : Dict ): UpperCamelCase :str = index + 1 # linecache starts at 1 UpperCamelCase :Optional[int] = self.prefix + linecache.getline(str(self.src_file ) , __lowerCamelCase ).rstrip("""\n""" ) UpperCamelCase :Optional[Any] = linecache.getline(str(self.tgt_file ) , __lowerCamelCase ).rstrip("""\n""" ) assert source_line, F"""empty source line for index {index}""" assert tgt_line, F"""empty tgt line for index {index}""" # Need to add eos token manually for T5 if isinstance(self.tokenizer , __lowerCamelCase ): source_line += self.tokenizer.eos_token tgt_line += self.tokenizer.eos_token # Pad source and target to the right UpperCamelCase :Tuple = ( self.tokenizer.question_encoder if isinstance(self.tokenizer , __lowerCamelCase ) else self.tokenizer ) UpperCamelCase :str = self.tokenizer.generator if isinstance(self.tokenizer , __lowerCamelCase ) else self.tokenizer UpperCamelCase :Optional[int] = encode_line(__lowerCamelCase , __lowerCamelCase , self.max_source_length , """right""" ) UpperCamelCase :Union[str, Any] = encode_line(__lowerCamelCase , __lowerCamelCase , self.max_target_length , """right""" ) UpperCamelCase :Dict = source_inputs["""input_ids"""].squeeze() UpperCamelCase :Tuple = target_inputs["""input_ids"""].squeeze() UpperCamelCase :int = source_inputs["""attention_mask"""].squeeze() return { "input_ids": source_ids, "attention_mask": src_mask, "decoder_input_ids": target_ids, } @staticmethod def _A ( __lowerCamelCase : Tuple ): return [len(__lowerCamelCase ) for x in Path(__lowerCamelCase ).open().readlines()] def _A ( self : Any , __lowerCamelCase : int ): UpperCamelCase :Optional[int] = torch.stack([x["""input_ids"""] for x in batch] ) UpperCamelCase :Tuple = torch.stack([x["""attention_mask"""] for x in batch] ) UpperCamelCase :Union[str, Any] = torch.stack([x["""decoder_input_ids"""] for x in batch] ) UpperCamelCase :List[Any] = ( self.tokenizer.generator.pad_token_id if isinstance(self.tokenizer , __lowerCamelCase ) else self.tokenizer.pad_token_id ) UpperCamelCase :List[str] = ( self.tokenizer.question_encoder.pad_token_id if isinstance(self.tokenizer , __lowerCamelCase ) else self.tokenizer.pad_token_id ) UpperCamelCase :Union[str, Any] = trim_batch(__lowerCamelCase , __lowerCamelCase ) UpperCamelCase , UpperCamelCase :str = trim_batch(__lowerCamelCase , __lowerCamelCase , attention_mask=__lowerCamelCase ) UpperCamelCase :List[str] = { """input_ids""": source_ids, """attention_mask""": source_mask, """decoder_input_ids""": y, } return batch UpperCAmelCase_ : str = getLogger(__name__) def SCREAMING_SNAKE_CASE_ ( __magic_name__ : List[List] ) -> Dict: """simple docstring""" return list(itertools.chain.from_iterable(__magic_name__ ) ) def SCREAMING_SNAKE_CASE_ ( __magic_name__ : str ) -> None: """simple docstring""" UpperCamelCase :List[str] = get_git_info() save_json(__magic_name__ , os.path.join(__magic_name__ , """git_log.json""" ) ) def SCREAMING_SNAKE_CASE_ ( __magic_name__ : str , __magic_name__ : int , __magic_name__ : List[str]=4 , **__magic_name__ : Dict ) -> str: """simple docstring""" with open(__magic_name__ , """w""" ) as f: json.dump(__magic_name__ , __magic_name__ , indent=__magic_name__ , **__magic_name__ ) def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Union[str, Any] ) -> Tuple: """simple docstring""" with open(__magic_name__ ) as f: return json.load(__magic_name__ ) def SCREAMING_SNAKE_CASE_ ( ) -> Union[str, Any]: """simple docstring""" UpperCamelCase :List[str] = git.Repo(search_parent_directories=__magic_name__ ) UpperCamelCase :Optional[Any] = { """repo_id""": str(__magic_name__ ), """repo_sha""": str(repo.head.object.hexsha ), """repo_branch""": str(repo.active_branch ), """hostname""": str(socket.gethostname() ), } return repo_infos def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Callable , __magic_name__ : Iterable ) -> List: """simple docstring""" return list(map(__magic_name__ , __magic_name__ ) ) def SCREAMING_SNAKE_CASE_ ( __magic_name__ : List[Any] , __magic_name__ : Tuple ) -> Optional[Any]: """simple docstring""" with open(__magic_name__ , """wb""" ) as f: return pickle.dump(__magic_name__ , __magic_name__ ) def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Tuple ) -> List[str]: """simple docstring""" def remove_articles(__magic_name__ : List[Any] ): return re.sub(R"""\b(a|an|the)\b""" , """ """ , __magic_name__ ) def white_space_fix(__magic_name__ : Any ): return " ".join(text.split() ) def remove_punc(__magic_name__ : int ): UpperCamelCase :Union[str, Any] = set(string.punctuation ) return "".join(ch for ch in text if ch not in exclude ) def lower(__magic_name__ : Union[str, Any] ): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(__magic_name__ ) ) ) ) def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Dict , __magic_name__ : List[str] ) -> Optional[int]: """simple docstring""" UpperCamelCase :Dict = normalize_answer(__magic_name__ ).split() UpperCamelCase :Union[str, Any] = normalize_answer(__magic_name__ ).split() UpperCamelCase :str = Counter(__magic_name__ ) & Counter(__magic_name__ ) UpperCamelCase :Dict = sum(common.values() ) if num_same == 0: return 0 UpperCamelCase :List[str] = 1.0 * num_same / len(__magic_name__ ) UpperCamelCase :Tuple = 1.0 * num_same / len(__magic_name__ ) UpperCamelCase :Optional[Any] = (2 * precision * recall) / (precision + recall) return fa def SCREAMING_SNAKE_CASE_ ( __magic_name__ : int , __magic_name__ : int ) -> List[str]: """simple docstring""" return normalize_answer(__magic_name__ ) == normalize_answer(__magic_name__ ) def SCREAMING_SNAKE_CASE_ ( __magic_name__ : List[str] , __magic_name__ : List[str] ) -> Dict: """simple docstring""" assert len(__magic_name__ ) == len(__magic_name__ ) UpperCamelCase :Dict = 0 for hypo, pred in zip(__magic_name__ , __magic_name__ ): em += exact_match_score(__magic_name__ , __magic_name__ ) if len(__magic_name__ ) > 0: em /= len(__magic_name__ ) return {"em": em} def SCREAMING_SNAKE_CASE_ ( __magic_name__ : List[str] ) -> Union[str, Any]: """simple docstring""" return model_prefix.startswith("""rag""" ) def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Optional[int] , __magic_name__ : int , __magic_name__ : Optional[int] ) -> Optional[Any]: """simple docstring""" UpperCamelCase :Dict = {p: p for p in extra_params} # T5 models don't have `dropout` param, they have `dropout_rate` instead UpperCamelCase :str = """dropout_rate""" for p in extra_params: if getattr(__magic_name__ , __magic_name__ , __magic_name__ ): if not hasattr(__magic_name__ , __magic_name__ ) and not hasattr(__magic_name__ , equivalent_param[p] ): logger.info("""config doesn't have a `{}` attribute""".format(__magic_name__ ) ) delattr(__magic_name__ , __magic_name__ ) continue UpperCamelCase :List[Any] = p if hasattr(__magic_name__ , __magic_name__ ) else equivalent_param[p] setattr(__magic_name__ , __magic_name__ , getattr(__magic_name__ , __magic_name__ ) ) delattr(__magic_name__ , __magic_name__ ) return hparams, config
38
import html from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin from ...utils import is_bsa_available, logging, requires_backends if is_bsa_available(): import bsa from bsa import BeautifulSoup UpperCAmelCase_ : Any = logging.get_logger(__name__) class _SCREAMING_SNAKE_CASE ( _a ): def __init__( self : Optional[int] , **__lowerCamelCase : Optional[int] ): requires_backends(self , ["""bs4"""] ) super().__init__(**__lowerCamelCase ) def _A ( self : List[str] , __lowerCamelCase : Any ): UpperCamelCase :Optional[int] = [] UpperCamelCase :List[str] = [] UpperCamelCase :Union[str, Any] = element if element.name else element.parent for parent in child.parents: # type: bs4.element.Tag UpperCamelCase :Optional[Any] = parent.find_all(child.name , recursive=__lowerCamelCase ) xpath_tags.append(child.name ) xpath_subscripts.append( 0 if 1 == len(__lowerCamelCase ) else next(i for i, s in enumerate(__lowerCamelCase , 1 ) if s is child ) ) UpperCamelCase :Any = parent xpath_tags.reverse() xpath_subscripts.reverse() return xpath_tags, xpath_subscripts def _A ( self : Any , __lowerCamelCase : Tuple ): UpperCamelCase :Any = BeautifulSoup(__lowerCamelCase , """html.parser""" ) UpperCamelCase :Union[str, Any] = [] UpperCamelCase :Tuple = [] UpperCamelCase :Tuple = [] for element in html_code.descendants: if type(__lowerCamelCase ) == bsa.element.NavigableString: if type(element.parent ) != bsa.element.Tag: continue UpperCamelCase :Any = html.unescape(__lowerCamelCase ).strip() if not text_in_this_tag: continue all_doc_strings.append(__lowerCamelCase ) UpperCamelCase , UpperCamelCase :Optional[Any] = self.xpath_soup(__lowerCamelCase ) stringaxtag_seq.append(__lowerCamelCase ) stringaxsubs_seq.append(__lowerCamelCase ) if len(__lowerCamelCase ) != len(__lowerCamelCase ): raise ValueError("""Number of doc strings and xtags does not correspond""" ) if len(__lowerCamelCase ) != len(__lowerCamelCase ): raise ValueError("""Number of doc strings and xsubs does not correspond""" ) return all_doc_strings, stringaxtag_seq, stringaxsubs_seq def _A ( self : int , __lowerCamelCase : List[Any] , __lowerCamelCase : List[str] ): UpperCamelCase :Tuple = """""" for tagname, subs in zip(__lowerCamelCase , __lowerCamelCase ): xpath += F"""/{tagname}""" if subs != 0: xpath += F"""[{subs}]""" return xpath def __call__( self : Any , __lowerCamelCase : Dict ): UpperCamelCase :Any = False # Check that strings has a valid type if isinstance(__lowerCamelCase , __lowerCamelCase ): UpperCamelCase :List[Any] = True elif isinstance(__lowerCamelCase , (list, tuple) ): if len(__lowerCamelCase ) == 0 or isinstance(html_strings[0] , __lowerCamelCase ): UpperCamelCase :Any = True if not valid_strings: raise ValueError( """HTML strings must of type `str`, `List[str]` (batch of examples), """ F"""but is of type {type(__lowerCamelCase )}.""" ) UpperCamelCase :str = bool(isinstance(__lowerCamelCase , (list, tuple) ) and (isinstance(html_strings[0] , __lowerCamelCase )) ) if not is_batched: UpperCamelCase :Any = [html_strings] # Get nodes + xpaths UpperCamelCase :Union[str, Any] = [] UpperCamelCase :str = [] for html_string in html_strings: UpperCamelCase , UpperCamelCase , UpperCamelCase :int = self.get_three_from_single(__lowerCamelCase ) nodes.append(__lowerCamelCase ) UpperCamelCase :int = [] for node, tag_list, sub_list in zip(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ): UpperCamelCase :str = self.construct_xpath(__lowerCamelCase , __lowerCamelCase ) xpath_strings.append(__lowerCamelCase ) xpaths.append(__lowerCamelCase ) # return as Dict UpperCamelCase :Optional[int] = {"""nodes""": nodes, """xpaths""": xpaths} UpperCamelCase :Any = BatchFeature(data=__lowerCamelCase , tensor_type=__lowerCamelCase ) return encoded_inputs
38
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, is_vision_available, ) UpperCAmelCase_ : List[str] = {'''processing_layoutxlm''': ['''LayoutXLMProcessor''']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ : int = ['''LayoutXLMTokenizer'''] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ : Optional[int] = ['''LayoutXLMTokenizerFast'''] if TYPE_CHECKING: from .processing_layoutxlm import LayoutXLMProcessor try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutxlm import LayoutXLMTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast else: import sys UpperCAmelCase_ : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
38
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : list[list[int]] , __magic_name__ : int , __magic_name__ : int , __magic_name__ : list[int] ) -> bool: """simple docstring""" if graph[path[curr_ind - 1]][next_ver] == 0: return False # 2. Validate that next vertex is not already in path return not any(vertex == next_ver for vertex in path ) def SCREAMING_SNAKE_CASE_ ( __magic_name__ : list[list[int]] , __magic_name__ : list[int] , __magic_name__ : int ) -> bool: """simple docstring""" if curr_ind == len(__magic_name__ ): # return whether path exists between current and starting vertices return graph[path[curr_ind - 1]][path[0]] == 1 # Recursive Step for next_ver in range(0 , len(__magic_name__ ) ): if valid_connection(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ): # Insert current vertex into path as next transition UpperCamelCase :str = next_ver # Validate created path if util_hamilton_cycle(__magic_name__ , __magic_name__ , curr_ind + 1 ): return True # Backtrack UpperCamelCase :Union[str, Any] = -1 return False def SCREAMING_SNAKE_CASE_ ( __magic_name__ : list[list[int]] , __magic_name__ : int = 0 ) -> list[int]: """simple docstring""" UpperCamelCase :Union[str, Any] = [-1] * (len(__magic_name__ ) + 1) # initialize start and end of path with starting index UpperCamelCase :Any = start_index # evaluate and if we find answer return path either return empty array return path if util_hamilton_cycle(__magic_name__ , __magic_name__ , 1 ) else []
38
1
from __future__ import annotations from collections.abc import MutableSequence class _SCREAMING_SNAKE_CASE : def __init__( self : List[Any] , __lowerCamelCase : int , __lowerCamelCase : MutableSequence[float] ): if len(__lowerCamelCase ) != degree + 1: raise ValueError( """The number of coefficients should be equal to the degree + 1.""" ) UpperCamelCase :list[float] = list(__lowerCamelCase ) UpperCamelCase :Dict = degree def __add__( self : str , __lowerCamelCase : Polynomial ): if self.degree > polynomial_a.degree: UpperCamelCase :List[str] = self.coefficients[:] for i in range(polynomial_a.degree + 1 ): coefficients[i] += polynomial_a.coefficients[i] return Polynomial(self.degree , __lowerCamelCase ) else: UpperCamelCase :Dict = polynomial_a.coefficients[:] for i in range(self.degree + 1 ): coefficients[i] += self.coefficients[i] return Polynomial(polynomial_a.degree , __lowerCamelCase ) def __sub__( self : int , __lowerCamelCase : Polynomial ): return self + polynomial_a * Polynomial(0 , [-1] ) def __neg__( self : Any ): return Polynomial(self.degree , [-c for c in self.coefficients] ) def __mul__( self : List[str] , __lowerCamelCase : Polynomial ): UpperCamelCase :list[float] = [0] * (self.degree + polynomial_a.degree + 1) for i in range(self.degree + 1 ): for j in range(polynomial_a.degree + 1 ): coefficients[i + j] += ( self.coefficients[i] * polynomial_a.coefficients[j] ) return Polynomial(self.degree + polynomial_a.degree , __lowerCamelCase ) def _A ( self : Tuple , __lowerCamelCase : int | float ): UpperCamelCase :int | float = 0 for i in range(self.degree + 1 ): result += self.coefficients[i] * (substitution**i) return result def __str__( self : List[Any] ): UpperCamelCase :Any = """""" for i in range(self.degree , -1 , -1 ): if self.coefficients[i] == 0: continue elif self.coefficients[i] > 0: if polynomial: polynomial += " + " else: polynomial += " - " if i == 0: polynomial += str(abs(self.coefficients[i] ) ) elif i == 1: polynomial += str(abs(self.coefficients[i] ) ) + "x" else: polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(__lowerCamelCase ) return polynomial def __repr__( self : Optional[Any] ): return self.__str__() def _A ( self : Optional[int] ): UpperCamelCase :list[float] = [0] * self.degree for i in range(self.degree ): UpperCamelCase :List[Any] = self.coefficients[i + 1] * (i + 1) return Polynomial(self.degree - 1 , __lowerCamelCase ) def _A ( self : Optional[int] , __lowerCamelCase : int | float = 0 ): UpperCamelCase :list[float] = [0] * (self.degree + 2) UpperCamelCase :List[Any] = constant for i in range(self.degree + 1 ): UpperCamelCase :List[str] = self.coefficients[i] / (i + 1) return Polynomial(self.degree + 1 , __lowerCamelCase ) def __eq__( self : Any , __lowerCamelCase : object ): if not isinstance(__lowerCamelCase , __lowerCamelCase ): return False if self.degree != polynomial_a.degree: return False for i in range(self.degree + 1 ): if self.coefficients[i] != polynomial_a.coefficients[i]: return False return True def __ne__( self : List[Any] , __lowerCamelCase : object ): return not self.__eq__(__lowerCamelCase )
38
import os import tempfile import unittest from transformers import FlaubertConfig, is_torch_available from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( FlaubertForMultipleChoice, FlaubertForQuestionAnswering, FlaubertForQuestionAnsweringSimple, FlaubertForSequenceClassification, FlaubertForTokenClassification, FlaubertModel, FlaubertWithLMHeadModel, ) from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST class _SCREAMING_SNAKE_CASE ( _a ): def __init__( self : List[str] , __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any]=13 , __lowerCamelCase : str=7 , __lowerCamelCase : Tuple=True , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : Any=True , __lowerCamelCase : List[Any]=True , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : Optional[int]=False , __lowerCamelCase : str=False , __lowerCamelCase : List[Any]=False , __lowerCamelCase : List[Any]=2 , __lowerCamelCase : Union[str, Any]=99 , __lowerCamelCase : Optional[Any]=0 , __lowerCamelCase : Tuple=32 , __lowerCamelCase : Any=5 , __lowerCamelCase : Optional[Any]=4 , __lowerCamelCase : Union[str, Any]=0.1 , __lowerCamelCase : Union[str, Any]=0.1 , __lowerCamelCase : List[str]=512 , __lowerCamelCase : List[Any]=12 , __lowerCamelCase : int=2 , __lowerCamelCase : List[str]=0.02 , __lowerCamelCase : Union[str, Any]=3 , __lowerCamelCase : Tuple=4 , __lowerCamelCase : Optional[int]="last" , __lowerCamelCase : Optional[Any]=None , __lowerCamelCase : List[str]=None , ): UpperCamelCase :int = parent UpperCamelCase :Optional[int] = batch_size UpperCamelCase :str = seq_length UpperCamelCase :Optional[int] = is_training UpperCamelCase :Optional[int] = use_input_lengths UpperCamelCase :Union[str, Any] = use_token_type_ids UpperCamelCase :List[str] = use_labels UpperCamelCase :Dict = gelu_activation UpperCamelCase :Optional[int] = sinusoidal_embeddings UpperCamelCase :List[Any] = causal UpperCamelCase :Optional[int] = asm UpperCamelCase :List[str] = n_langs UpperCamelCase :int = vocab_size UpperCamelCase :List[Any] = n_special UpperCamelCase :List[Any] = hidden_size UpperCamelCase :List[str] = num_hidden_layers UpperCamelCase :List[Any] = num_attention_heads UpperCamelCase :Tuple = hidden_dropout_prob UpperCamelCase :List[str] = attention_probs_dropout_prob UpperCamelCase :Tuple = max_position_embeddings UpperCamelCase :List[str] = type_vocab_size UpperCamelCase :Union[str, Any] = type_sequence_label_size UpperCamelCase :int = initializer_range UpperCamelCase :List[str] = num_labels UpperCamelCase :Optional[int] = num_choices UpperCamelCase :Optional[Any] = summary_type UpperCamelCase :Tuple = use_proj UpperCamelCase :Optional[Any] = scope def _A ( self : List[str] ): UpperCamelCase :Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCamelCase :Any = random_attention_mask([self.batch_size, self.seq_length] ) UpperCamelCase :List[Any] = None if self.use_input_lengths: UpperCamelCase :Dict = ( ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2 ) # small variation of seq_length UpperCamelCase :str = None if self.use_token_type_ids: UpperCamelCase :int = ids_tensor([self.batch_size, self.seq_length] , self.n_langs ) UpperCamelCase :Optional[int] = None UpperCamelCase :int = None UpperCamelCase :List[Any] = None if self.use_labels: UpperCamelCase :Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCamelCase :List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCamelCase :List[str] = ids_tensor([self.batch_size] , 2 ).float() UpperCamelCase :List[str] = ids_tensor([self.batch_size] , self.num_choices ) UpperCamelCase :Union[str, Any] = self.get_config() return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def _A ( self : List[Any] ): return FlaubertConfig( vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , ) def _A ( self : Union[str, Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : int , __lowerCamelCase : Tuple , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] , __lowerCamelCase : Any , __lowerCamelCase : int , ): UpperCamelCase :Tuple = FlaubertModel(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() UpperCamelCase :int = model(__lowerCamelCase , lengths=__lowerCamelCase , langs=__lowerCamelCase ) UpperCamelCase :List[Any] = model(__lowerCamelCase , langs=__lowerCamelCase ) UpperCamelCase :int = model(__lowerCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _A ( self : Optional[int] , __lowerCamelCase : List[str] , __lowerCamelCase : Any , __lowerCamelCase : Tuple , __lowerCamelCase : int , __lowerCamelCase : List[Any] , __lowerCamelCase : Tuple , __lowerCamelCase : List[Any] , __lowerCamelCase : Any , __lowerCamelCase : Dict , ): UpperCamelCase :Any = FlaubertWithLMHeadModel(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() UpperCamelCase :Dict = model(__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _A ( self : List[Any] , __lowerCamelCase : str , __lowerCamelCase : Tuple , __lowerCamelCase : List[str] , __lowerCamelCase : int , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : str , __lowerCamelCase : List[str] , __lowerCamelCase : List[Any] , ): UpperCamelCase :Any = FlaubertForQuestionAnsweringSimple(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() UpperCamelCase :Any = model(__lowerCamelCase ) UpperCamelCase :int = model(__lowerCamelCase , start_positions=__lowerCamelCase , end_positions=__lowerCamelCase ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def _A ( self : str , __lowerCamelCase : int , __lowerCamelCase : str , __lowerCamelCase : Optional[Any] , __lowerCamelCase : int , __lowerCamelCase : Tuple , __lowerCamelCase : Any , __lowerCamelCase : List[str] , __lowerCamelCase : Dict , __lowerCamelCase : str , ): UpperCamelCase :str = FlaubertForQuestionAnswering(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() UpperCamelCase :Any = model(__lowerCamelCase ) UpperCamelCase :Optional[int] = model( __lowerCamelCase , start_positions=__lowerCamelCase , end_positions=__lowerCamelCase , cls_index=__lowerCamelCase , is_impossible=__lowerCamelCase , p_mask=__lowerCamelCase , ) UpperCamelCase :Union[str, Any] = model( __lowerCamelCase , start_positions=__lowerCamelCase , end_positions=__lowerCamelCase , cls_index=__lowerCamelCase , is_impossible=__lowerCamelCase , ) ((UpperCamelCase) , ) :int = result_with_labels.to_tuple() UpperCamelCase :int = model(__lowerCamelCase , start_positions=__lowerCamelCase , end_positions=__lowerCamelCase ) ((UpperCamelCase) , ) :List[Any] = result_with_labels.to_tuple() self.parent.assertEqual(result_with_labels.loss.shape , () ) self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual( result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual( result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) ) def _A ( self : List[Any] , __lowerCamelCase : Any , __lowerCamelCase : Any , __lowerCamelCase : int , __lowerCamelCase : Dict , __lowerCamelCase : Any , __lowerCamelCase : Any , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Tuple , ): UpperCamelCase :Optional[int] = FlaubertForSequenceClassification(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() UpperCamelCase :Tuple = model(__lowerCamelCase ) UpperCamelCase :List[str] = model(__lowerCamelCase , labels=__lowerCamelCase ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def _A ( self : Any , __lowerCamelCase : int , __lowerCamelCase : Optional[int] , __lowerCamelCase : str , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : int , __lowerCamelCase : Optional[int] , __lowerCamelCase : Tuple , __lowerCamelCase : List[Any] , __lowerCamelCase : List[Any] , ): UpperCamelCase :Dict = self.num_labels UpperCamelCase :Tuple = FlaubertForTokenClassification(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() UpperCamelCase :Optional[Any] = model(__lowerCamelCase , attention_mask=__lowerCamelCase , labels=__lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def _A ( self : Tuple , __lowerCamelCase : Tuple , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[str] , __lowerCamelCase : Tuple , __lowerCamelCase : Tuple , __lowerCamelCase : List[str] , __lowerCamelCase : List[Any] , ): UpperCamelCase :Union[str, Any] = self.num_choices UpperCamelCase :List[Any] = FlaubertForMultipleChoice(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() UpperCamelCase :Optional[Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() UpperCamelCase :Optional[int] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() UpperCamelCase :int = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() UpperCamelCase :Union[str, Any] = model( __lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def _A ( self : str ): UpperCamelCase :List[str] = self.prepare_config_and_inputs() ( ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ) :List[Any] = config_and_inputs UpperCamelCase :Union[str, Any] = { """input_ids""": input_ids, """token_type_ids""": token_type_ids, """lengths""": input_lengths, """attention_mask""": input_mask, } return config, inputs_dict @require_torch class _SCREAMING_SNAKE_CASE ( _a , _a , unittest.TestCase ): snake_case__ : Optional[int] = ( ( FlaubertModel, FlaubertWithLMHeadModel, FlaubertForQuestionAnswering, FlaubertForQuestionAnsweringSimple, FlaubertForSequenceClassification, FlaubertForTokenClassification, FlaubertForMultipleChoice, ) if is_torch_available() else () ) snake_case__ : Tuple = ( { """feature-extraction""": FlaubertModel, """fill-mask""": FlaubertWithLMHeadModel, """question-answering""": FlaubertForQuestionAnsweringSimple, """text-classification""": FlaubertForSequenceClassification, """token-classification""": FlaubertForTokenClassification, """zero-shot""": FlaubertForSequenceClassification, } if is_torch_available() else {} ) def _A ( self : int , __lowerCamelCase : List[str] , __lowerCamelCase : Tuple , __lowerCamelCase : List[str] , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int] ): if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith("""Fast""" ) ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def _A ( self : Optional[int] , __lowerCamelCase : Dict , __lowerCamelCase : Tuple , __lowerCamelCase : Tuple=False ): UpperCamelCase :Tuple = super()._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase ) if return_labels: if model_class.__name__ == "FlaubertForQuestionAnswering": UpperCamelCase :Tuple = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__lowerCamelCase ) UpperCamelCase :List[Any] = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__lowerCamelCase ) return inputs_dict def _A ( self : str ): UpperCamelCase :List[Any] = FlaubertModelTester(self ) UpperCamelCase :Any = ConfigTester(self , config_class=__lowerCamelCase , emb_dim=37 ) def _A ( self : Optional[int] ): self.config_tester.run_common_tests() def _A ( self : List[Any] ): UpperCamelCase :List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_model(*__lowerCamelCase ) def _A ( self : Optional[int] ): UpperCamelCase :Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_lm_head(*__lowerCamelCase ) def _A ( self : List[Any] ): UpperCamelCase :Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_simple_qa(*__lowerCamelCase ) def _A ( self : Union[str, Any] ): UpperCamelCase :int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_qa(*__lowerCamelCase ) def _A ( self : Optional[Any] ): UpperCamelCase :List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_sequence_classif(*__lowerCamelCase ) def _A ( self : Tuple ): UpperCamelCase :Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_token_classif(*__lowerCamelCase ) def _A ( self : int ): UpperCamelCase :Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_multiple_choice(*__lowerCamelCase ) @slow def _A ( self : Any ): for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCamelCase :Optional[int] = FlaubertModel.from_pretrained(__lowerCamelCase ) self.assertIsNotNone(__lowerCamelCase ) @slow @require_torch_gpu def _A ( self : Tuple ): UpperCamelCase , UpperCamelCase :Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # FlauBertForMultipleChoice behaves incorrectly in JIT environments. if model_class == FlaubertForMultipleChoice: return UpperCamelCase :Optional[Any] = True UpperCamelCase :Optional[Any] = model_class(config=__lowerCamelCase ) UpperCamelCase :str = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) UpperCamelCase :str = torch.jit.trace( __lowerCamelCase , (inputs_dict["""input_ids"""].to("""cpu""" ), inputs_dict["""attention_mask"""].to("""cpu""" )) ) with tempfile.TemporaryDirectory() as tmp: torch.jit.save(__lowerCamelCase , os.path.join(__lowerCamelCase , """traced_model.pt""" ) ) UpperCamelCase :int = torch.jit.load(os.path.join(__lowerCamelCase , """traced_model.pt""" ) , map_location=__lowerCamelCase ) loaded(inputs_dict["""input_ids"""].to(__lowerCamelCase ) , inputs_dict["""attention_mask"""].to(__lowerCamelCase ) ) @require_torch class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): @slow def _A ( self : Optional[Any] ): UpperCamelCase :Union[str, Any] = FlaubertModel.from_pretrained("""flaubert/flaubert_base_cased""" ) UpperCamelCase :Optional[Any] = torch.tensor([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] ) with torch.no_grad(): UpperCamelCase :Tuple = model(__lowerCamelCase )[0] UpperCamelCase :Union[str, Any] = torch.Size((1, 11, 768) ) self.assertEqual(output.shape , __lowerCamelCase ) UpperCamelCase :int = torch.tensor( [[[-2.6251, -1.4298, -0.0227], [-2.8510, -1.6387, 0.2258], [-2.8114, -1.1832, -0.3066]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , __lowerCamelCase , atol=1E-4 ) )
38
1
import asyncio import os import re import sys import tempfile import unittest from contextlib import contextmanager from copy import deepcopy from distutils.util import strtobool from enum import Enum from importlib.util import find_spec from pathlib import Path from unittest.mock import patch import pyarrow as pa import pytest import requests from packaging import version from datasets import config if config.PY_VERSION < version.parse('''3.8'''): import importlib_metadata else: import importlib.metadata as importlib_metadata def SCREAMING_SNAKE_CASE_ ( __magic_name__ : str , __magic_name__ : Optional[int]=False ) -> Tuple: """simple docstring""" try: UpperCamelCase :str = os.environ[key] except KeyError: # KEY isn't set, default to `default`. UpperCamelCase :str = default else: # KEY is set, convert it to True or False. try: UpperCamelCase :Union[str, Any] = strtobool(__magic_name__ ) except ValueError: # More values are supported, but let's keep the message simple. raise ValueError(f"""If set, {key} must be yes or no.""" ) return _value UpperCAmelCase_ : Any = parse_flag_from_env('''RUN_SLOW''', default=False) UpperCAmelCase_ : Optional[Any] = parse_flag_from_env('''RUN_REMOTE''', default=False) UpperCAmelCase_ : Dict = parse_flag_from_env('''RUN_LOCAL''', default=True) UpperCAmelCase_ : str = parse_flag_from_env('''RUN_PACKAGED''', default=True) # Compression UpperCAmelCase_ : str = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='''test requires lz4''') UpperCAmelCase_ : Optional[Any] = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='''test requires py7zr''') UpperCAmelCase_ : str = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='''test requires zstandard''') # Audio UpperCAmelCase_ : str = pytest.mark.skipif( # On Windows and OS X, soundfile installs sndfile find_spec('''soundfile''') is None or version.parse(importlib_metadata.version('''soundfile''')) < version.parse('''0.12.0'''), reason='''test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ''', ) # Beam UpperCAmelCase_ : Any = pytest.mark.skipif( not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('''0.3.2'''), reason='''test requires apache-beam and a compatible dill version''', ) # Dill-cloudpickle compatibility UpperCAmelCase_ : Optional[int] = pytest.mark.skipif( config.DILL_VERSION <= version.parse('''0.3.2'''), reason='''test requires dill>0.3.2 for cloudpickle compatibility''', ) # Windows UpperCAmelCase_ : Optional[Any] = pytest.mark.skipif( sys.platform == '''win32''', reason='''test should not be run on Windows''', ) def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Dict ) -> Any: """simple docstring""" try: import faiss # noqa except ImportError: UpperCamelCase :List[str] = unittest.skip("""test requires faiss""" )(__magic_name__ ) return test_case def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Tuple ) -> int: """simple docstring""" try: import regex # noqa except ImportError: UpperCamelCase :Dict = unittest.skip("""test requires regex""" )(__magic_name__ ) return test_case def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Any ) -> Dict: """simple docstring""" try: import elasticsearch # noqa except ImportError: UpperCamelCase :int = unittest.skip("""test requires elasticsearch""" )(__magic_name__ ) return test_case def SCREAMING_SNAKE_CASE_ ( __magic_name__ : str ) -> Dict: """simple docstring""" try: import sqlalchemy # noqa except ImportError: UpperCamelCase :Tuple = unittest.skip("""test requires sqlalchemy""" )(__magic_name__ ) return test_case def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Any ) -> List[Any]: """simple docstring""" if not config.TORCH_AVAILABLE: UpperCamelCase :int = unittest.skip("""test requires PyTorch""" )(__magic_name__ ) return test_case def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Optional[Any] ) -> List[Any]: """simple docstring""" if not config.TF_AVAILABLE: UpperCamelCase :str = unittest.skip("""test requires TensorFlow""" )(__magic_name__ ) return test_case def SCREAMING_SNAKE_CASE_ ( __magic_name__ : List[Any] ) -> Optional[Any]: """simple docstring""" if not config.JAX_AVAILABLE: UpperCamelCase :Optional[int] = unittest.skip("""test requires JAX""" )(__magic_name__ ) return test_case def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Optional[Any] ) -> Union[str, Any]: """simple docstring""" if not config.PIL_AVAILABLE: UpperCamelCase :int = unittest.skip("""test requires Pillow""" )(__magic_name__ ) return test_case def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Optional[int] ) -> Dict: """simple docstring""" try: import transformers # noqa F401 except ImportError: return unittest.skip("""test requires transformers""" )(__magic_name__ ) else: return test_case def SCREAMING_SNAKE_CASE_ ( __magic_name__ : str ) -> int: """simple docstring""" try: import tiktoken # noqa F401 except ImportError: return unittest.skip("""test requires tiktoken""" )(__magic_name__ ) else: return test_case def SCREAMING_SNAKE_CASE_ ( __magic_name__ : List[Any] ) -> List[str]: """simple docstring""" try: import spacy # noqa F401 except ImportError: return unittest.skip("""test requires spacy""" )(__magic_name__ ) else: return test_case def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Optional[Any] ) -> Tuple: """simple docstring""" def _require_spacy_model(__magic_name__ : List[str] ): try: import spacy # noqa F401 spacy.load(__magic_name__ ) except ImportError: return unittest.skip("""test requires spacy""" )(__magic_name__ ) except OSError: return unittest.skip("""test requires spacy model '{}'""".format(__magic_name__ ) )(__magic_name__ ) else: return test_case return _require_spacy_model def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Tuple ) -> List[Any]: """simple docstring""" try: import pyspark # noqa F401 except ImportError: return unittest.skip("""test requires pyspark""" )(__magic_name__ ) else: return test_case def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Tuple ) -> Optional[int]: """simple docstring""" try: import joblibspark # noqa F401 except ImportError: return unittest.skip("""test requires joblibspark""" )(__magic_name__ ) else: return test_case def SCREAMING_SNAKE_CASE_ ( __magic_name__ : str ) -> List[str]: """simple docstring""" if not _run_slow_tests or _run_slow_tests == 0: UpperCamelCase :Optional[int] = unittest.skip("""test is slow""" )(__magic_name__ ) return test_case def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Any ) -> Union[str, Any]: """simple docstring""" if not _run_local_tests or _run_local_tests == 0: UpperCamelCase :str = unittest.skip("""test is local""" )(__magic_name__ ) return test_case def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Any ) -> int: """simple docstring""" if not _run_packaged_tests or _run_packaged_tests == 0: UpperCamelCase :Optional[int] = unittest.skip("""test is packaged""" )(__magic_name__ ) return test_case def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Optional[Any] ) -> Union[str, Any]: """simple docstring""" if not _run_remote_tests or _run_remote_tests == 0: UpperCamelCase :Optional[int] = unittest.skip("""test requires remote""" )(__magic_name__ ) return test_case def SCREAMING_SNAKE_CASE_ ( *__magic_name__ : int ) -> List[str]: """simple docstring""" def decorate(cls : int ): for name, fn in cls.__dict__.items(): if callable(__magic_name__ ) and name.startswith("""test""" ): for decorator in decorators: UpperCamelCase :int = decorator(__magic_name__ ) setattr(cls , __magic_name__ , __magic_name__ ) return cls return decorate class _SCREAMING_SNAKE_CASE ( _a ): pass class _SCREAMING_SNAKE_CASE ( _a ): snake_case__ : Union[str, Any] = 0 snake_case__ : Union[str, Any] = 1 snake_case__ : List[str] = 2 @contextmanager def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Dict=OfflineSimulationMode.CONNECTION_FAILS , __magic_name__ : str=1E-16 ) -> Any: """simple docstring""" UpperCamelCase :Optional[int] = requests.Session().request def timeout_request(__magic_name__ : str , __magic_name__ : List[str] , __magic_name__ : Optional[int] , **__magic_name__ : Dict ): # Change the url to an invalid url so that the connection hangs UpperCamelCase :Dict = """https://10.255.255.1""" if kwargs.get("""timeout""" ) is None: raise RequestWouldHangIndefinitelyError( f"""Tried a call to {url} in offline mode with no timeout set. Please set a timeout.""" ) UpperCamelCase :List[str] = timeout try: return online_request(__magic_name__ , __magic_name__ , **__magic_name__ ) except Exception as e: # The following changes in the error are just here to make the offline timeout error prettier UpperCamelCase :Any = url UpperCamelCase :Any = e.args[0] UpperCamelCase :Tuple = (max_retry_error.args[0].replace("""10.255.255.1""" , f"""OfflineMock[{url}]""" ),) UpperCamelCase :List[str] = (max_retry_error,) raise def raise_connection_error(__magic_name__ : Tuple , __magic_name__ : int , **__magic_name__ : str ): raise requests.ConnectionError("""Offline mode is enabled.""" , request=__magic_name__ ) if mode is OfflineSimulationMode.CONNECTION_FAILS: with patch("""requests.Session.send""" , __magic_name__ ): yield elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT: # inspired from https://stackoverflow.com/a/904609 with patch("""requests.Session.request""" , __magic_name__ ): yield elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1: with patch("""datasets.config.HF_DATASETS_OFFLINE""" , __magic_name__ ): yield else: raise ValueError("""Please use a value from the OfflineSimulationMode enum.""" ) @contextmanager def SCREAMING_SNAKE_CASE_ ( *__magic_name__ : str , **__magic_name__ : Dict ) -> List[Any]: """simple docstring""" UpperCamelCase :Optional[int] = str(Path().resolve() ) with tempfile.TemporaryDirectory(*__magic_name__ , **__magic_name__ ) as tmp_dir: try: os.chdir(__magic_name__ ) yield finally: os.chdir(__magic_name__ ) @contextmanager def SCREAMING_SNAKE_CASE_ ( ) -> Optional[int]: """simple docstring""" import gc gc.collect() UpperCamelCase :Tuple = pa.total_allocated_bytes() yield assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase." @contextmanager def SCREAMING_SNAKE_CASE_ ( ) -> Optional[Any]: """simple docstring""" import gc gc.collect() UpperCamelCase :Dict = pa.total_allocated_bytes() yield assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase." def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Dict , __magic_name__ : Dict ) -> Tuple: """simple docstring""" return deepcopy(__magic_name__ ).integers(0 , 100 , 10 ).tolist() == deepcopy(__magic_name__ ).integers(0 , 100 , 10 ).tolist() def SCREAMING_SNAKE_CASE_ ( __magic_name__ : List[str] ) -> Union[str, Any]: """simple docstring""" import decorator from requests.exceptions import HTTPError def _wrapper(__magic_name__ : Tuple , *__magic_name__ : List[str] , **__magic_name__ : List[Any] ): try: return func(*__magic_name__ , **__magic_name__ ) except HTTPError as err: if str(__magic_name__ ).startswith("""500""" ) or str(__magic_name__ ).startswith("""502""" ): pytest.xfail(str(__magic_name__ ) ) raise err return decorator.decorator(_wrapper , __magic_name__ ) class _SCREAMING_SNAKE_CASE : def __init__( self : Tuple , __lowerCamelCase : Dict , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Any ): UpperCamelCase :int = returncode UpperCamelCase :Tuple = stdout UpperCamelCase :List[str] = stderr async def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Dict , __magic_name__ : List[str] ) -> Union[str, Any]: """simple docstring""" while True: UpperCamelCase :List[str] = await stream.readline() if line: callback(__magic_name__ ) else: break async def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Tuple , __magic_name__ : List[str]=None , __magic_name__ : Optional[int]=None , __magic_name__ : int=None , __magic_name__ : int=False , __magic_name__ : str=False ) -> _RunOutput: """simple docstring""" if echo: print("""\nRunning: """ , """ """.join(__magic_name__ ) ) UpperCamelCase :List[str] = await asyncio.create_subprocess_exec( cmd[0] , *cmd[1:] , stdin=__magic_name__ , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=__magic_name__ , ) # note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe # https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait # # If it starts hanging, will need to switch to the following code. The problem is that no data # will be seen until it's done and if it hangs for example there will be no debug info. # out, err = await p.communicate() # return _RunOutput(p.returncode, out, err) UpperCamelCase :List[Any] = [] UpperCamelCase :List[Any] = [] def tee(__magic_name__ : List[str] , __magic_name__ : List[Any] , __magic_name__ : int , __magic_name__ : Optional[Any]="" ): UpperCamelCase :List[Any] = line.decode("""utf-8""" ).rstrip() sink.append(__magic_name__ ) if not quiet: print(__magic_name__ , __magic_name__ , file=__magic_name__ ) # XXX: the timeout doesn't seem to make any difference here await asyncio.wait( [ _read_stream(p.stdout , lambda __magic_name__ : tee(__magic_name__ , __magic_name__ , sys.stdout , label="""stdout:""" ) ), _read_stream(p.stderr , lambda __magic_name__ : tee(__magic_name__ , __magic_name__ , sys.stderr , label="""stderr:""" ) ), ] , timeout=__magic_name__ , ) return _RunOutput(await p.wait() , __magic_name__ , __magic_name__ ) def SCREAMING_SNAKE_CASE_ ( __magic_name__ : int , __magic_name__ : List[Any]=None , __magic_name__ : Optional[Any]=None , __magic_name__ : List[str]=180 , __magic_name__ : Optional[Any]=False , __magic_name__ : Any=True ) -> _RunOutput: """simple docstring""" UpperCamelCase :str = asyncio.get_event_loop() UpperCamelCase :Optional[int] = loop.run_until_complete( _stream_subprocess(__magic_name__ , env=__magic_name__ , stdin=__magic_name__ , timeout=__magic_name__ , quiet=__magic_name__ , echo=__magic_name__ ) ) UpperCamelCase :Tuple = """ """.join(__magic_name__ ) if result.returncode > 0: UpperCamelCase :int = """\n""".join(result.stderr ) raise RuntimeError( f"""'{cmd_str}' failed with returncode {result.returncode}\n\n""" f"""The combined stderr from workers follows:\n{stderr}""" ) # check that the subprocess actually did run and produced some output, should the test rely on # the remote side to do the testing if not result.stdout and not result.stderr: raise RuntimeError(f"""'{cmd_str}' produced no output.""" ) return result def SCREAMING_SNAKE_CASE_ ( ) -> Union[str, Any]: """simple docstring""" UpperCamelCase :int = os.environ.get("""PYTEST_XDIST_WORKER""" , """gw0""" ) UpperCamelCase :str = re.sub(R"""^gw""" , """""" , __magic_name__ , 0 , re.M ) return int(__magic_name__ ) def SCREAMING_SNAKE_CASE_ ( ) -> Any: """simple docstring""" UpperCamelCase :Union[str, Any] = 2_9500 UpperCamelCase :Union[str, Any] = pytest_xdist_worker_id() return port + uniq_delta
38
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor from .base import PipelineTool class _SCREAMING_SNAKE_CASE ( _a ): snake_case__ : Any = """openai/whisper-base""" snake_case__ : Optional[int] = ( """This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the """ """transcribed text.""" ) snake_case__ : Any = """transcriber""" snake_case__ : Optional[int] = WhisperProcessor snake_case__ : str = WhisperForConditionalGeneration snake_case__ : Optional[Any] = ["""audio"""] snake_case__ : Any = ["""text"""] def _A ( self : str , __lowerCamelCase : Dict ): return self.pre_processor(__lowerCamelCase , return_tensors="""pt""" ).input_features def _A ( self : Dict , __lowerCamelCase : List[Any] ): return self.model.generate(inputs=__lowerCamelCase ) def _A ( self : Any , __lowerCamelCase : Optional[Any] ): return self.pre_processor.batch_decode(__lowerCamelCase , skip_special_tokens=__lowerCamelCase )[0]
38
1
import argparse import datetime def SCREAMING_SNAKE_CASE_ ( __magic_name__ : str ) -> str: """simple docstring""" UpperCamelCase :Union[str, Any] = { """0""": """Sunday""", """1""": """Monday""", """2""": """Tuesday""", """3""": """Wednesday""", """4""": """Thursday""", """5""": """Friday""", """6""": """Saturday""", } UpperCamelCase :str = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0} # Validate if not 0 < len(__magic_name__ ) < 11: raise ValueError("""Must be 10 characters long""" ) # Get month UpperCamelCase :int = int(date_input[0] + date_input[1] ) # Validate if not 0 < m < 13: raise ValueError("""Month must be between 1 - 12""" ) UpperCamelCase :str = date_input[2] # Validate if sep_a not in ["-", "/"]: raise ValueError("""Date separator must be '-' or '/'""" ) # Get day UpperCamelCase :int = int(date_input[3] + date_input[4] ) # Validate if not 0 < d < 32: raise ValueError("""Date must be between 1 - 31""" ) # Get second separator UpperCamelCase :str = date_input[5] # Validate if sep_a not in ["-", "/"]: raise ValueError("""Date separator must be '-' or '/'""" ) # Get year UpperCamelCase :int = int(date_input[6] + date_input[7] + date_input[8] + date_input[9] ) # Arbitrary year range if not 45 < y < 8500: raise ValueError( """Year out of range. There has to be some sort of limit...right?""" ) # Get datetime obj for validation UpperCamelCase :Optional[Any] = datetime.date(int(__magic_name__ ) , int(__magic_name__ ) , int(__magic_name__ ) ) # Start math if m <= 2: UpperCamelCase :Dict = y - 1 UpperCamelCase :Union[str, Any] = m + 12 # maths var UpperCamelCase :int = int(str(__magic_name__ )[:2] ) UpperCamelCase :int = int(str(__magic_name__ )[2:] ) UpperCamelCase :int = int(2.6 * m - 5.39 ) UpperCamelCase :int = int(c / 4 ) UpperCamelCase :int = int(k / 4 ) UpperCamelCase :int = int(d + k ) UpperCamelCase :int = int(t + u + v + x ) UpperCamelCase :int = int(z - (2 * c) ) UpperCamelCase :int = round(w % 7 ) # End math # Validate math if f != convert_datetime_days[dt_ck.weekday()]: raise AssertionError("""The date was evaluated incorrectly. Contact developer.""" ) # Response UpperCamelCase :str = f"""Your date {date_input}, is a {days[str(__magic_name__ )]}!""" return response if __name__ == "__main__": import doctest doctest.testmod() UpperCAmelCase_ : List[Any] = argparse.ArgumentParser( description=( '''Find out what day of the week nearly any date is or was. Enter ''' '''date as a string in the mm-dd-yyyy or mm/dd/yyyy format''' ) ) parser.add_argument( '''date_input''', type=str, help='''Date as a string (mm-dd-yyyy or mm/dd/yyyy)''' ) UpperCAmelCase_ : Tuple = parser.parse_args() zeller(args.date_input)
38
import copy from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Audio, Features, Value from .base import TaskTemplate @dataclass(frozen=_a ) class _SCREAMING_SNAKE_CASE ( _a ): snake_case__ : str = field(default="""automatic-speech-recognition""" , metadata={"""include_in_asdict_even_if_is_default""": True} ) snake_case__ : ClassVar[Features] = Features({"""audio""": Audio()} ) snake_case__ : ClassVar[Features] = Features({"""transcription""": Value("""string""" )} ) snake_case__ : str = "audio" snake_case__ : str = "transcription" def _A ( self : List[str] , __lowerCamelCase : Dict ): if self.audio_column not in features: raise ValueError(F"""Column {self.audio_column} is not present in features.""" ) if not isinstance(features[self.audio_column] , __lowerCamelCase ): raise ValueError(F"""Column {self.audio_column} is not an Audio type.""" ) UpperCamelCase :int = copy.deepcopy(self ) UpperCamelCase :Any = self.input_schema.copy() UpperCamelCase :List[str] = features[self.audio_column] UpperCamelCase :List[Any] = input_schema return task_template @property def _A ( self : Optional[int] ): return {self.audio_column: "audio", self.transcription_column: "transcription"}
38
1
from . import __version__ # Backward compatibility imports, to make sure all those objects can be found in file_utils from .utils import ( CLOUDFRONT_DISTRIB_PREFIX, CONFIG_NAME, DISABLE_TELEMETRY, DUMMY_INPUTS, DUMMY_MASK, ENV_VARS_TRUE_AND_AUTO_VALUES, ENV_VARS_TRUE_VALUES, FEATURE_EXTRACTOR_NAME, FLAX_WEIGHTS_NAME, HF_MODULES_CACHE, HUGGINGFACE_CO_PREFIX, HUGGINGFACE_CO_RESOLVE_ENDPOINT, MODEL_CARD_NAME, MULTIPLE_CHOICE_DUMMY_INPUTS, PYTORCH_PRETRAINED_BERT_CACHE, PYTORCH_TRANSFORMERS_CACHE, S3_BUCKET_PREFIX, SENTENCEPIECE_UNDERLINE, SPIECE_UNDERLINE, TF2_WEIGHTS_NAME, TF_WEIGHTS_NAME, TORCH_FX_REQUIRED_VERSION, TRANSFORMERS_CACHE, TRANSFORMERS_DYNAMIC_MODULE_NAME, USE_JAX, USE_TF, USE_TORCH, WEIGHTS_INDEX_NAME, WEIGHTS_NAME, ContextManagers, DummyObject, EntryNotFoundError, ExplicitEnum, ModelOutput, PaddingStrategy, PushToHubMixin, RepositoryNotFoundError, RevisionNotFoundError, TensorType, _LazyModule, add_code_sample_docstrings, add_end_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, cached_property, copy_func, default_cache_path, define_sagemaker_information, get_cached_models, get_file_from_repo, get_full_repo_name, get_torch_version, has_file, http_user_agent, is_apex_available, is_bsa_available, is_coloredlogs_available, is_datasets_available, is_detectrona_available, is_faiss_available, is_flax_available, is_ftfy_available, is_in_notebook, is_ipex_available, is_librosa_available, is_offline_mode, is_onnx_available, is_pandas_available, is_phonemizer_available, is_protobuf_available, is_psutil_available, is_pyanvml_available, is_pyctcdecode_available, is_pytesseract_available, is_pytorch_quantization_available, is_rjieba_available, is_sagemaker_dp_enabled, is_sagemaker_mp_enabled, is_scipy_available, is_sentencepiece_available, is_seqio_available, is_sklearn_available, is_soundfile_availble, is_spacy_available, is_speech_available, is_tensor, is_tensorflow_probability_available, is_tfaonnx_available, is_tf_available, is_timm_available, is_tokenizers_available, is_torch_available, is_torch_bfaa_available, is_torch_cuda_available, is_torch_fx_available, is_torch_fx_proxy, is_torch_mps_available, is_torch_tfaa_available, is_torch_tpu_available, is_torchaudio_available, is_training_run_on_sagemaker, is_vision_available, replace_return_docstrings, requires_backends, to_numpy, to_py_obj, torch_only_method, )
38
from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline else: from .camera import create_pan_cameras from .pipeline_shap_e import ShapEPipeline from .pipeline_shap_e_img2img import ShapEImgaImgPipeline from .renderer import ( BoundingBoxVolume, ImportanceRaySampler, MLPNeRFModelOutput, MLPNeRSTFModel, ShapEParamsProjModel, ShapERenderer, StratifiedRaySampler, VoidNeRFModel, )
38
1
# using dfs for finding eulerian path traversal def SCREAMING_SNAKE_CASE_ ( __magic_name__ : str , __magic_name__ : Union[str, Any] , __magic_name__ : Any , __magic_name__ : int=None ) -> Union[str, Any]: """simple docstring""" UpperCamelCase :Any = (path or []) + [u] for v in graph[u]: if visited_edge[u][v] is False: UpperCamelCase , UpperCamelCase :Optional[Any] = True, True UpperCamelCase :Dict = dfs(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) return path def SCREAMING_SNAKE_CASE_ ( __magic_name__ : List[Any] , __magic_name__ : Optional[Any] ) -> Dict: """simple docstring""" UpperCamelCase :Dict = 0 UpperCamelCase :Dict = -1 for i in range(__magic_name__ ): if i not in graph.keys(): continue if len(graph[i] ) % 2 == 1: odd_degree_nodes += 1 UpperCamelCase :List[Any] = i if odd_degree_nodes == 0: return 1, odd_node if odd_degree_nodes == 2: return 2, odd_node return 3, odd_node def SCREAMING_SNAKE_CASE_ ( __magic_name__ : int , __magic_name__ : List[Any] ) -> int: """simple docstring""" UpperCamelCase :str = [[False for _ in range(max_node + 1 )] for _ in range(max_node + 1 )] UpperCamelCase , UpperCamelCase :Optional[int] = check_circuit_or_path(__magic_name__ , __magic_name__ ) if check == 3: print("""graph is not Eulerian""" ) print("""no path""" ) return UpperCamelCase :int = 1 if check == 2: UpperCamelCase :Optional[int] = odd_node print("""graph has a Euler path""" ) if check == 1: print("""graph has a Euler cycle""" ) UpperCamelCase :Optional[int] = dfs(__magic_name__ , __magic_name__ , __magic_name__ ) print(__magic_name__ ) def SCREAMING_SNAKE_CASE_ ( ) -> List[Any]: """simple docstring""" UpperCamelCase :List[Any] = {1: [2, 3, 4], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [4]} UpperCamelCase :Optional[int] = {1: [2, 3, 4, 5], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [1, 4]} UpperCamelCase :Tuple = {1: [2, 3, 4], 2: [1, 3, 4], 3: [1, 2], 4: [1, 2, 5], 5: [4]} UpperCamelCase :Optional[Any] = {1: [2, 3], 2: [1, 3], 3: [1, 2]} UpperCamelCase :List[Any] = { 1: [], 2: [] # all degree is zero } UpperCamelCase :List[str] = 10 check_euler(__magic_name__ , __magic_name__ ) check_euler(__magic_name__ , __magic_name__ ) check_euler(__magic_name__ , __magic_name__ ) check_euler(__magic_name__ , __magic_name__ ) check_euler(__magic_name__ , __magic_name__ ) if __name__ == "__main__": main()
38
import re import string import numpy as np import datasets UpperCAmelCase_ : Dict = ''' Returns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list. ''' UpperCAmelCase_ : Any = ''' Args: predictions: List of predicted texts. references: List of reference texts. regexes_to_ignore: List, defaults to None. Regex expressions of characters to ignore when calculating the exact matches. Note: these regexes are removed from the input data before the changes based on the options below (e.g. ignore_case, ignore_punctuation, ignore_numbers) are applied. ignore_case: Boolean, defaults to False. If true, turns everything to lowercase so that capitalization differences are ignored. ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before comparing predictions and references. ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before comparing predictions and references. Returns: exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive. Examples: >>> exact_match = datasets.load_metric("exact_match") >>> refs = ["the cat", "theater", "YELLING", "agent007"] >>> preds = ["cat?", "theater", "yelling", "agent"] >>> results = exact_match.compute(references=refs, predictions=preds) >>> print(round(results["exact_match"], 1)) 25.0 >>> exact_match = datasets.load_metric("exact_match") >>> refs = ["the cat", "theater", "YELLING", "agent007"] >>> preds = ["cat?", "theater", "yelling", "agent"] >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell"], ignore_case=True, ignore_punctuation=True) >>> print(round(results["exact_match"], 1)) 50.0 >>> exact_match = datasets.load_metric("exact_match") >>> refs = ["the cat", "theater", "YELLING", "agent007"] >>> preds = ["cat?", "theater", "yelling", "agent"] >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True) >>> print(round(results["exact_match"], 1)) 75.0 >>> exact_match = datasets.load_metric("exact_match") >>> refs = ["the cat", "theater", "YELLING", "agent007"] >>> preds = ["cat?", "theater", "yelling", "agent"] >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True) >>> print(round(results["exact_match"], 1)) 100.0 >>> exact_match = datasets.load_metric("exact_match") >>> refs = ["The cat sat on the mat.", "Theaters are great.", "It\'s like comparing oranges and apples."] >>> preds = ["The cat sat on the mat?", "Theaters are great.", "It\'s like comparing apples and oranges."] >>> results = exact_match.compute(references=refs, predictions=preds) >>> print(round(results["exact_match"], 1)) 33.3 ''' UpperCAmelCase_ : Tuple = ''' ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class _SCREAMING_SNAKE_CASE ( datasets.Metric ): def _A ( self : Optional[int] ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Value("""string""" , id="""sequence""" ), """references""": datasets.Value("""string""" , id="""sequence""" ), } ) , reference_urls=[] , ) def _A ( self : Optional[int] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[str] , __lowerCamelCase : List[str]=None , __lowerCamelCase : Optional[int]=False , __lowerCamelCase : int=False , __lowerCamelCase : Optional[int]=False , ): if regexes_to_ignore is not None: for s in regexes_to_ignore: UpperCamelCase :str = np.array([re.sub(__lowerCamelCase , """""" , __lowerCamelCase ) for x in predictions] ) UpperCamelCase :Tuple = np.array([re.sub(__lowerCamelCase , """""" , __lowerCamelCase ) for x in references] ) else: UpperCamelCase :Any = np.asarray(__lowerCamelCase ) UpperCamelCase :str = np.asarray(__lowerCamelCase ) if ignore_case: UpperCamelCase :Tuple = np.char.lower(__lowerCamelCase ) UpperCamelCase :Any = np.char.lower(__lowerCamelCase ) if ignore_punctuation: UpperCamelCase :Optional[int] = string.punctuation.maketrans("""""" , """""" , string.punctuation ) UpperCamelCase :Optional[Any] = np.char.translate(__lowerCamelCase , table=__lowerCamelCase ) UpperCamelCase :List[str] = np.char.translate(__lowerCamelCase , table=__lowerCamelCase ) if ignore_numbers: UpperCamelCase :Tuple = string.digits.maketrans("""""" , """""" , string.digits ) UpperCamelCase :Dict = np.char.translate(__lowerCamelCase , table=__lowerCamelCase ) UpperCamelCase :Tuple = np.char.translate(__lowerCamelCase , table=__lowerCamelCase ) UpperCamelCase :int = predictions == references return {"exact_match": np.mean(__lowerCamelCase ) * 100}
38
1
import gc import inspect import unittest import torch from parameterized import parameterized from diffusers import PriorTransformer from diffusers.utils import floats_tensor, slow, torch_all_close, torch_device from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin enable_full_determinism() class _SCREAMING_SNAKE_CASE ( _a , unittest.TestCase ): snake_case__ : Any = PriorTransformer snake_case__ : List[Any] = """hidden_states""" @property def _A ( self : List[Any] ): UpperCamelCase :List[str] = 4 UpperCamelCase :Optional[int] = 8 UpperCamelCase :Dict = 7 UpperCamelCase :Tuple = floats_tensor((batch_size, embedding_dim) ).to(__lowerCamelCase ) UpperCamelCase :Optional[int] = floats_tensor((batch_size, embedding_dim) ).to(__lowerCamelCase ) UpperCamelCase :int = floats_tensor((batch_size, num_embeddings, embedding_dim) ).to(__lowerCamelCase ) return { "hidden_states": hidden_states, "timestep": 2, "proj_embedding": proj_embedding, "encoder_hidden_states": encoder_hidden_states, } def _A ( self : Optional[int] , __lowerCamelCase : int=0 ): torch.manual_seed(__lowerCamelCase ) UpperCamelCase :Dict = 4 UpperCamelCase :Optional[Any] = 8 UpperCamelCase :List[Any] = 7 UpperCamelCase :str = torch.randn((batch_size, embedding_dim) ).to(__lowerCamelCase ) UpperCamelCase :int = torch.randn((batch_size, embedding_dim) ).to(__lowerCamelCase ) UpperCamelCase :Tuple = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(__lowerCamelCase ) return { "hidden_states": hidden_states, "timestep": 2, "proj_embedding": proj_embedding, "encoder_hidden_states": encoder_hidden_states, } @property def _A ( self : Optional[int] ): return (4, 8) @property def _A ( self : Optional[int] ): return (4, 8) def _A ( self : Optional[int] ): UpperCamelCase :Dict = { """num_attention_heads""": 2, """attention_head_dim""": 4, """num_layers""": 2, """embedding_dim""": 8, """num_embeddings""": 7, """additional_embeddings""": 4, } UpperCamelCase :int = self.dummy_input return init_dict, inputs_dict def _A ( self : Dict ): UpperCamelCase , UpperCamelCase :Optional[int] = PriorTransformer.from_pretrained( """hf-internal-testing/prior-dummy""" , output_loading_info=__lowerCamelCase ) self.assertIsNotNone(__lowerCamelCase ) self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 ) model.to(__lowerCamelCase ) UpperCamelCase :Union[str, Any] = model(**self.dummy_input )[0] assert hidden_states is not None, "Make sure output is not None" def _A ( self : int ): UpperCamelCase , UpperCamelCase :str = self.prepare_init_args_and_inputs_for_common() UpperCamelCase :Tuple = self.model_class(**__lowerCamelCase ) UpperCamelCase :Any = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCamelCase :Any = [*signature.parameters.keys()] UpperCamelCase :int = ["""hidden_states""", """timestep"""] self.assertListEqual(arg_names[:2] , __lowerCamelCase ) def _A ( self : Optional[int] ): UpperCamelCase :Optional[int] = PriorTransformer.from_pretrained("""hf-internal-testing/prior-dummy""" ) UpperCamelCase :Optional[Any] = model.to(__lowerCamelCase ) if hasattr(__lowerCamelCase , """set_default_attn_processor""" ): model.set_default_attn_processor() UpperCamelCase :Dict = self.get_dummy_seed_input() with torch.no_grad(): UpperCamelCase :Dict = model(**__lowerCamelCase )[0] UpperCamelCase :int = output[0, :5].flatten().cpu() print(__lowerCamelCase ) # Since the VAE Gaussian prior's generator is seeded on the appropriate device, # the expected output slices are not the same for CPU and GPU. UpperCamelCase :Optional[int] = torch.tensor([-1.3436, -0.2870, 0.7538, 0.4368, -0.0239] ) self.assertTrue(torch_all_close(__lowerCamelCase , __lowerCamelCase , rtol=1E-2 ) ) @slow class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): def _A ( self : int , __lowerCamelCase : Tuple=1 , __lowerCamelCase : Optional[int]=768 , __lowerCamelCase : Union[str, Any]=77 , __lowerCamelCase : Union[str, Any]=0 ): torch.manual_seed(__lowerCamelCase ) UpperCamelCase :Optional[Any] = batch_size UpperCamelCase :Tuple = embedding_dim UpperCamelCase :Optional[int] = num_embeddings UpperCamelCase :List[Any] = torch.randn((batch_size, embedding_dim) ).to(__lowerCamelCase ) UpperCamelCase :List[Any] = torch.randn((batch_size, embedding_dim) ).to(__lowerCamelCase ) UpperCamelCase :Optional[Any] = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(__lowerCamelCase ) return { "hidden_states": hidden_states, "timestep": 2, "proj_embedding": proj_embedding, "encoder_hidden_states": encoder_hidden_states, } def _A ( self : int ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() @parameterized.expand( [ # fmt: off [13, [-0.5861, 0.1283, -0.0931, 0.0882, 0.4476, 0.1329, -0.0498, 0.0640]], [37, [-0.4913, 0.0110, -0.0483, 0.0541, 0.4954, -0.0170, 0.0354, 0.1651]], # fmt: on ] ) def _A ( self : Any , __lowerCamelCase : Any , __lowerCamelCase : Dict ): UpperCamelCase :Any = PriorTransformer.from_pretrained("""kandinsky-community/kandinsky-2-1-prior""" , subfolder="""prior""" ) model.to(__lowerCamelCase ) UpperCamelCase :Any = self.get_dummy_seed_input(seed=__lowerCamelCase ) with torch.no_grad(): UpperCamelCase :Optional[int] = model(**__lowerCamelCase )[0] assert list(sample.shape ) == [1, 768] UpperCamelCase :Dict = sample[0, :8].flatten().cpu() print(__lowerCamelCase ) UpperCamelCase :List[str] = torch.tensor(__lowerCamelCase ) assert torch_all_close(__lowerCamelCase , __lowerCamelCase , atol=1E-3 )
38
from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...onnx.utils import compute_effective_axis_dimension from ...utils import logging if TYPE_CHECKING: from ...processing_utils import ProcessorMixin from ...utils import TensorType UpperCAmelCase_ : Dict = logging.get_logger(__name__) UpperCAmelCase_ : str = { '''microsoft/layoutlmv3-base''': '''https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json''', } class _SCREAMING_SNAKE_CASE ( _a ): snake_case__ : Optional[int] = """layoutlmv3""" def __init__( self : List[Any] , __lowerCamelCase : Optional[Any]=50_265 , __lowerCamelCase : Dict=768 , __lowerCamelCase : Any=12 , __lowerCamelCase : int=12 , __lowerCamelCase : str=3_072 , __lowerCamelCase : List[Any]="gelu" , __lowerCamelCase : List[str]=0.1 , __lowerCamelCase : Optional[int]=0.1 , __lowerCamelCase : Optional[Any]=512 , __lowerCamelCase : Optional[int]=2 , __lowerCamelCase : Union[str, Any]=0.02 , __lowerCamelCase : Union[str, Any]=1E-5 , __lowerCamelCase : Any=1 , __lowerCamelCase : Optional[int]=0 , __lowerCamelCase : List[Any]=2 , __lowerCamelCase : Dict=1_024 , __lowerCamelCase : List[Any]=128 , __lowerCamelCase : str=128 , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : str=32 , __lowerCamelCase : List[Any]=128 , __lowerCamelCase : str=64 , __lowerCamelCase : List[str]=256 , __lowerCamelCase : Dict=True , __lowerCamelCase : List[Any]=True , __lowerCamelCase : Tuple=True , __lowerCamelCase : Tuple=224 , __lowerCamelCase : Tuple=3 , __lowerCamelCase : Dict=16 , __lowerCamelCase : Union[str, Any]=None , **__lowerCamelCase : Optional[Any] , ): super().__init__( vocab_size=__lowerCamelCase , hidden_size=__lowerCamelCase , num_hidden_layers=__lowerCamelCase , num_attention_heads=__lowerCamelCase , intermediate_size=__lowerCamelCase , hidden_act=__lowerCamelCase , hidden_dropout_prob=__lowerCamelCase , attention_probs_dropout_prob=__lowerCamelCase , max_position_embeddings=__lowerCamelCase , type_vocab_size=__lowerCamelCase , initializer_range=__lowerCamelCase , layer_norm_eps=__lowerCamelCase , pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase , ) UpperCamelCase :int = max_ad_position_embeddings UpperCamelCase :Tuple = coordinate_size UpperCamelCase :List[Any] = shape_size UpperCamelCase :Union[str, Any] = has_relative_attention_bias UpperCamelCase :Any = rel_pos_bins UpperCamelCase :Optional[Any] = max_rel_pos UpperCamelCase :str = has_spatial_attention_bias UpperCamelCase :Tuple = rel_ad_pos_bins UpperCamelCase :Optional[int] = max_rel_ad_pos UpperCamelCase :Tuple = text_embed UpperCamelCase :str = visual_embed UpperCamelCase :Optional[Any] = input_size UpperCamelCase :str = num_channels UpperCamelCase :List[Any] = patch_size UpperCamelCase :Optional[Any] = classifier_dropout class _SCREAMING_SNAKE_CASE ( _a ): snake_case__ : int = version.parse("""1.12""" ) @property def _A ( self : Optional[int] ): # The order of inputs is different for question answering and sequence classification if self.task in ["question-answering", "sequence-classification"]: return OrderedDict( [ ("""input_ids""", {0: """batch""", 1: """sequence"""}), ("""attention_mask""", {0: """batch""", 1: """sequence"""}), ("""bbox""", {0: """batch""", 1: """sequence"""}), ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) else: return OrderedDict( [ ("""input_ids""", {0: """batch""", 1: """sequence"""}), ("""bbox""", {0: """batch""", 1: """sequence"""}), ("""attention_mask""", {0: """batch""", 1: """sequence"""}), ("""pixel_values""", {0: """batch""", 1: """num_channels"""}), ] ) @property def _A ( self : str ): return 1E-5 @property def _A ( self : Dict ): return 12 def _A ( self : Dict , __lowerCamelCase : "ProcessorMixin" , __lowerCamelCase : int = -1 , __lowerCamelCase : int = -1 , __lowerCamelCase : bool = False , __lowerCamelCase : Optional["TensorType"] = None , __lowerCamelCase : int = 3 , __lowerCamelCase : int = 40 , __lowerCamelCase : int = 40 , ): setattr(processor.image_processor , """apply_ocr""" , __lowerCamelCase ) # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX UpperCamelCase :Optional[Any] = compute_effective_axis_dimension( __lowerCamelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX UpperCamelCase :Optional[int] = processor.tokenizer.num_special_tokens_to_add(__lowerCamelCase ) UpperCamelCase :int = compute_effective_axis_dimension( __lowerCamelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__lowerCamelCase ) # Generate dummy inputs according to compute batch and sequence UpperCamelCase :Any = [[""" """.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size # Generate dummy bounding boxes UpperCamelCase :Optional[Any] = [[[48, 84, 73, 128]]] * batch_size # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX # batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch) UpperCamelCase :List[str] = self._generate_dummy_images(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) UpperCamelCase :Any = dict( processor( __lowerCamelCase , text=__lowerCamelCase , boxes=__lowerCamelCase , return_tensors=__lowerCamelCase , ) ) return inputs
38
1
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices UpperCAmelCase_ : List[str] = logging.get_logger(__name__) UpperCAmelCase_ : str = { '''microsoft/resnet-50''': '''https://huggingface.co/microsoft/resnet-50/blob/main/config.json''', } class _SCREAMING_SNAKE_CASE ( _a , _a ): snake_case__ : List[str] = """resnet""" snake_case__ : str = ["""basic""", """bottleneck"""] def __init__( self : str , __lowerCamelCase : int=3 , __lowerCamelCase : str=64 , __lowerCamelCase : Tuple=[256, 512, 1_024, 2_048] , __lowerCamelCase : int=[3, 4, 6, 3] , __lowerCamelCase : Any="bottleneck" , __lowerCamelCase : Optional[Any]="relu" , __lowerCamelCase : Optional[int]=False , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : Optional[Any]=None , **__lowerCamelCase : Tuple , ): super().__init__(**__lowerCamelCase ) if layer_type not in self.layer_types: raise ValueError(F"""layer_type={layer_type} is not one of {','.join(self.layer_types )}""" ) UpperCamelCase :Optional[Any] = num_channels UpperCamelCase :Dict = embedding_size UpperCamelCase :Optional[Any] = hidden_sizes UpperCamelCase :int = depths UpperCamelCase :List[Any] = layer_type UpperCamelCase :Any = hidden_act UpperCamelCase :Optional[int] = downsample_in_first_stage UpperCamelCase :List[str] = ["""stem"""] + [F"""stage{idx}""" for idx in range(1 , len(__lowerCamelCase ) + 1 )] UpperCamelCase , UpperCamelCase :Optional[Any] = get_aligned_output_features_output_indices( out_features=__lowerCamelCase , out_indices=__lowerCamelCase , stage_names=self.stage_names ) class _SCREAMING_SNAKE_CASE ( _a ): snake_case__ : Optional[Any] = version.parse("""1.11""" ) @property def _A ( self : List[str] ): return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) @property def _A ( self : Optional[int] ): return 1E-3
38
import gc import random import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import ( AutoencoderKL, DiffusionPipeline, EulerDiscreteScheduler, StableDiffusionXLImgaImgPipeline, UNetaDConditionModel, ) from diffusers.utils import floats_tensor, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class _SCREAMING_SNAKE_CASE ( _a , _a , unittest.TestCase ): snake_case__ : Any = StableDiffusionXLImgaImgPipeline snake_case__ : Tuple = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width"""} snake_case__ : Tuple = PipelineTesterMixin.required_optional_params - {"""latents"""} snake_case__ : Any = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS snake_case__ : List[str] = IMAGE_TO_IMAGE_IMAGE_PARAMS snake_case__ : Tuple = IMAGE_TO_IMAGE_IMAGE_PARAMS def _A ( self : int ): torch.manual_seed(0 ) UpperCamelCase :Any = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , attention_head_dim=(2, 4) , use_linear_projection=__lowerCamelCase , addition_embed_type="""text_time""" , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=80 , cross_attention_dim=64 , ) UpperCamelCase :Tuple = EulerDiscreteScheduler( beta_start=0.00085 , beta_end=0.012 , steps_offset=1 , beta_schedule="""scaled_linear""" , timestep_spacing="""leading""" , ) torch.manual_seed(0 ) UpperCamelCase :Union[str, Any] = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , ) torch.manual_seed(0 ) UpperCamelCase :Optional[int] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act="""gelu""" , projection_dim=32 , ) UpperCamelCase :Any = CLIPTextModel(__lowerCamelCase ) UpperCamelCase :List[Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" , local_files_only=__lowerCamelCase ) UpperCamelCase :List[Any] = CLIPTextModelWithProjection(__lowerCamelCase ) UpperCamelCase :int = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" , local_files_only=__lowerCamelCase ) UpperCamelCase :Union[str, Any] = { """unet""": unet, """scheduler""": scheduler, """vae""": vae, """text_encoder""": text_encoder, """tokenizer""": tokenizer, """text_encoder_2""": text_encoder_a, """tokenizer_2""": tokenizer_a, # "safety_checker": None, # "feature_extractor": None, } return components def _A ( self : Tuple , __lowerCamelCase : Any , __lowerCamelCase : Optional[Any]=0 ): UpperCamelCase :Tuple = floats_tensor((1, 3, 32, 32) , rng=random.Random(__lowerCamelCase ) ).to(__lowerCamelCase ) UpperCamelCase :List[str] = image / 2 + 0.5 if str(__lowerCamelCase ).startswith("""mps""" ): UpperCamelCase :Any = torch.manual_seed(__lowerCamelCase ) else: UpperCamelCase :List[Any] = torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase ) UpperCamelCase :str = { """prompt""": """A painting of a squirrel eating a burger""", """image""": image, """generator""": generator, """num_inference_steps""": 2, """guidance_scale""": 5.0, """output_type""": """numpy""", """strength""": 0.75, } return inputs def _A ( self : str ): UpperCamelCase :List[str] = """cpu""" # ensure determinism for the device-dependent torch.Generator UpperCamelCase :Optional[Any] = self.get_dummy_components() UpperCamelCase :List[Any] = StableDiffusionXLImgaImgPipeline(**__lowerCamelCase ) UpperCamelCase :Any = sd_pipe.to(__lowerCamelCase ) sd_pipe.set_progress_bar_config(disable=__lowerCamelCase ) UpperCamelCase :Optional[Any] = self.get_dummy_inputs(__lowerCamelCase ) UpperCamelCase :Union[str, Any] = sd_pipe(**__lowerCamelCase ).images UpperCamelCase :Union[str, Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) UpperCamelCase :List[Any] = np.array([0.4656, 0.4840, 0.4439, 0.6698, 0.5574, 0.4524, 0.5799, 0.5943, 0.5165] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def _A ( self : Dict ): super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 ) def _A ( self : Optional[Any] ): super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) def _A ( self : Union[str, Any] ): pass def _A ( self : Optional[int] ): UpperCamelCase :Union[str, Any] = self.get_dummy_components() UpperCamelCase :Dict = StableDiffusionXLImgaImgPipeline(**__lowerCamelCase ) UpperCamelCase :List[Any] = sd_pipe.to(__lowerCamelCase ) UpperCamelCase :List[str] = sd_pipe.to(__lowerCamelCase ) sd_pipe.set_progress_bar_config(disable=__lowerCamelCase ) # forward without prompt embeds UpperCamelCase :List[Any] = self.get_dummy_inputs(__lowerCamelCase ) UpperCamelCase :int = 3 * ["""this is a negative prompt"""] UpperCamelCase :Union[str, Any] = negative_prompt UpperCamelCase :Union[str, Any] = 3 * [inputs["""prompt"""]] UpperCamelCase :Dict = sd_pipe(**__lowerCamelCase ) UpperCamelCase :Union[str, Any] = output.images[0, -3:, -3:, -1] # forward with prompt embeds UpperCamelCase :Union[str, Any] = self.get_dummy_inputs(__lowerCamelCase ) UpperCamelCase :Optional[int] = 3 * ["""this is a negative prompt"""] UpperCamelCase :Union[str, Any] = 3 * [inputs.pop("""prompt""" )] ( ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ) :Union[str, Any] = sd_pipe.encode_prompt(__lowerCamelCase , negative_prompt=__lowerCamelCase ) UpperCamelCase :Dict = sd_pipe( **__lowerCamelCase , prompt_embeds=__lowerCamelCase , negative_prompt_embeds=__lowerCamelCase , pooled_prompt_embeds=__lowerCamelCase , negative_pooled_prompt_embeds=__lowerCamelCase , ) UpperCamelCase :Union[str, Any] = output.images[0, -3:, -3:, -1] # make sure that it's equal assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4 @slow @require_torch_gpu class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): def _A ( self : Tuple ): super().tearDown() gc.collect() torch.cuda.empty_cache() def _A ( self : List[Any] , __lowerCamelCase : Any , __lowerCamelCase : Dict="cpu" , __lowerCamelCase : List[Any]=torch.floataa , __lowerCamelCase : Tuple=0 ): UpperCamelCase :Optional[int] = torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase ) UpperCamelCase :Optional[Any] = np.random.RandomState(__lowerCamelCase ).standard_normal((1, 4, 64, 64) ) UpperCamelCase :Dict = torch.from_numpy(__lowerCamelCase ).to(device=__lowerCamelCase , dtype=__lowerCamelCase ) UpperCamelCase :str = { """prompt""": """a photograph of an astronaut riding a horse""", """latents""": latents, """generator""": generator, """num_inference_steps""": 3, """guidance_scale""": 7.5, """output_type""": """numpy""", } return inputs def _A ( self : Optional[Any] ): UpperCamelCase :Any = DiffusionPipeline.from_pretrained("""stabilityai/stable-diffusion-2-base""" ) pipe.to(__lowerCamelCase ) pipe.set_progress_bar_config(disable=__lowerCamelCase ) UpperCamelCase :Optional[Any] = self.get_inputs(__lowerCamelCase ) UpperCamelCase :Optional[int] = pipe(**__lowerCamelCase ).images UpperCamelCase :Dict = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) UpperCamelCase :Union[str, Any] = np.array([0.49493, 0.47896, 0.40798, 0.54214, 0.53212, 0.48202, 0.47656, 0.46329, 0.48506] ) assert np.abs(image_slice - expected_slice ).max() < 7E-3
38
1
import glob import os import random from string import ascii_lowercase, digits import cva UpperCAmelCase_ : int = '''''' UpperCAmelCase_ : Tuple = '''''' UpperCAmelCase_ : str = '''''' UpperCAmelCase_ : List[str] = 1 # (0 is vertical, 1 is horizontal) def SCREAMING_SNAKE_CASE_ ( ) -> None: """simple docstring""" UpperCamelCase , UpperCamelCase :Optional[Any] = get_dataset(__magic_name__ , __magic_name__ ) print("""Processing...""" ) UpperCamelCase , UpperCamelCase , UpperCamelCase :Dict = update_image_and_anno(__magic_name__ , __magic_name__ , __magic_name__ ) for index, image in enumerate(__magic_name__ ): # Get random string code: '7b7ad245cdff75241935e4dd860f3bad' UpperCamelCase :List[Any] = random_chars(32 ) UpperCamelCase :Any = paths[index].split(os.sep )[-1].rsplit(""".""" , 1 )[0] UpperCamelCase :Optional[Any] = f"""{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}""" cva.imwrite(f"""/{file_root}.jpg""" , __magic_name__ , [cva.IMWRITE_JPEG_QUALITY, 85] ) print(f"""Success {index+1}/{len(__magic_name__ )} with {file_name}""" ) UpperCamelCase :str = [] for anno in new_annos[index]: UpperCamelCase :List[Any] = f"""{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}""" annos_list.append(__magic_name__ ) with open(f"""/{file_root}.txt""" , """w""" ) as outfile: outfile.write("""\n""".join(line for line in annos_list ) ) def SCREAMING_SNAKE_CASE_ ( __magic_name__ : str , __magic_name__ : str ) -> tuple[list, list]: """simple docstring""" UpperCamelCase :Optional[Any] = [] UpperCamelCase :Optional[int] = [] for label_file in glob.glob(os.path.join(__magic_name__ , """*.txt""" ) ): UpperCamelCase :Optional[Any] = label_file.split(os.sep )[-1].rsplit(""".""" , 1 )[0] with open(__magic_name__ ) as in_file: UpperCamelCase :List[Any] = in_file.readlines() UpperCamelCase :List[str] = os.path.join(__magic_name__ , f"""{label_name}.jpg""" ) UpperCamelCase :Any = [] for obj_list in obj_lists: UpperCamelCase :str = obj_list.rstrip("""\n""" ).split(""" """ ) boxes.append( [ int(obj[0] ), float(obj[1] ), float(obj[2] ), float(obj[3] ), float(obj[4] ), ] ) if not boxes: continue img_paths.append(__magic_name__ ) labels.append(__magic_name__ ) return img_paths, labels def SCREAMING_SNAKE_CASE_ ( __magic_name__ : list , __magic_name__ : list , __magic_name__ : int = 1 ) -> tuple[list, list, list]: """simple docstring""" UpperCamelCase :Any = [] UpperCamelCase :List[Any] = [] UpperCamelCase :Optional[int] = [] for idx in range(len(__magic_name__ ) ): UpperCamelCase :Tuple = [] UpperCamelCase :List[Any] = img_list[idx] path_list.append(__magic_name__ ) UpperCamelCase :str = anno_list[idx] UpperCamelCase :Tuple = cva.imread(__magic_name__ ) if flip_type == 1: UpperCamelCase :List[str] = cva.flip(__magic_name__ , __magic_name__ ) for bbox in img_annos: UpperCamelCase :Any = 1 - bbox[1] new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] ) elif flip_type == 0: UpperCamelCase :int = cva.flip(__magic_name__ , __magic_name__ ) for bbox in img_annos: UpperCamelCase :int = 1 - bbox[2] new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] ) new_annos_lists.append(__magic_name__ ) new_imgs_list.append(__magic_name__ ) return new_imgs_list, new_annos_lists, path_list def SCREAMING_SNAKE_CASE_ ( __magic_name__ : int = 32 ) -> str: """simple docstring""" assert number_char > 1, "The number of character should greater than 1" UpperCamelCase :Optional[int] = ascii_lowercase + digits return "".join(random.choice(__magic_name__ ) for _ in range(__magic_name__ ) ) if __name__ == "__main__": main() print('''DONE ✅''')
38
from ....configuration_utils import PretrainedConfig from ....utils import logging UpperCAmelCase_ : List[str] = logging.get_logger(__name__) UpperCAmelCase_ : int = { '''CarlCochet/trajectory-transformer-halfcheetah-medium-v2''': ( '''https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json''' ), # See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer } class _SCREAMING_SNAKE_CASE ( _a ): snake_case__ : Any = """trajectory_transformer""" snake_case__ : Optional[Any] = ["""past_key_values"""] snake_case__ : Tuple = { """hidden_size""": """n_embd""", """num_attention_heads""": """n_head""", """num_hidden_layers""": """n_layer""", } def __init__( self : Union[str, Any] , __lowerCamelCase : Any=100 , __lowerCamelCase : str=5 , __lowerCamelCase : str=1 , __lowerCamelCase : Optional[int]=1 , __lowerCamelCase : int=249 , __lowerCamelCase : str=6 , __lowerCamelCase : Dict=17 , __lowerCamelCase : Optional[Any]=25 , __lowerCamelCase : List[str]=4 , __lowerCamelCase : str=4 , __lowerCamelCase : Tuple=128 , __lowerCamelCase : Dict=0.1 , __lowerCamelCase : str=0.1 , __lowerCamelCase : Any=0.1 , __lowerCamelCase : int=0.0006 , __lowerCamelCase : List[str]=512 , __lowerCamelCase : str=0.02 , __lowerCamelCase : Any=1E-12 , __lowerCamelCase : int=1 , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : Tuple=1 , __lowerCamelCase : int=50_256 , __lowerCamelCase : Union[str, Any]=50_256 , **__lowerCamelCase : Dict , ): UpperCamelCase :Dict = vocab_size UpperCamelCase :int = action_weight UpperCamelCase :Tuple = reward_weight UpperCamelCase :str = value_weight UpperCamelCase :Tuple = max_position_embeddings UpperCamelCase :Tuple = block_size UpperCamelCase :Optional[int] = action_dim UpperCamelCase :int = observation_dim UpperCamelCase :List[str] = transition_dim UpperCamelCase :List[Any] = learning_rate UpperCamelCase :Optional[Any] = n_layer UpperCamelCase :Any = n_head UpperCamelCase :List[str] = n_embd UpperCamelCase :Any = embd_pdrop UpperCamelCase :str = attn_pdrop UpperCamelCase :Union[str, Any] = resid_pdrop UpperCamelCase :Optional[Any] = initializer_range UpperCamelCase :List[Any] = layer_norm_eps UpperCamelCase :Optional[int] = kaiming_initializer_range UpperCamelCase :Tuple = use_cache super().__init__(pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase )
38
1
from sklearn.metrics import fa_score, matthews_corrcoef import datasets from .record_evaluation import evaluate as evaluate_record UpperCAmelCase_ : int = '''\ @article{wang2019superglue, title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems}, author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R}, journal={arXiv preprint arXiv:1905.00537}, year={2019} } ''' UpperCAmelCase_ : Optional[Any] = '''\ SuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after GLUE with a new set of more difficult language understanding tasks, improved resources, and a new public leaderboard. ''' UpperCAmelCase_ : int = ''' Compute SuperGLUE evaluation metric associated to each SuperGLUE dataset. Args: predictions: list of predictions to score. Depending on the SuperGlUE subset: - for \'record\': list of question-answer dictionaries with the following keys: - \'idx\': index of the question as specified by the dataset - \'prediction_text\': the predicted answer text - for \'multirc\': list of question-answer dictionaries with the following keys: - \'idx\': index of the question-answer pair as specified by the dataset - \'prediction\': the predicted answer label - otherwise: list of predicted labels references: list of reference labels. Depending on the SuperGLUE subset: - for \'record\': list of question-answers dictionaries with the following keys: - \'idx\': index of the question as specified by the dataset - \'answers\': list of possible answers - otherwise: list of reference labels Returns: depending on the SuperGLUE subset: - for \'record\': - \'exact_match\': Exact match between answer and gold answer - \'f1\': F1 score - for \'multirc\': - \'exact_match\': Exact match between answer and gold answer - \'f1_m\': Per-question macro-F1 score - \'f1_a\': Average F1 score over all answers - for \'axb\': \'matthews_correlation\': Matthew Correlation - for \'cb\': - \'accuracy\': Accuracy - \'f1\': F1 score - for all others: - \'accuracy\': Accuracy Examples: >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'copa\') # any of ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"] >>> predictions = [0, 1] >>> references = [0, 1] >>> results = super_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {\'accuracy\': 1.0} >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'cb\') >>> predictions = [0, 1] >>> references = [0, 1] >>> results = super_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {\'accuracy\': 1.0, \'f1\': 1.0} >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'record\') >>> predictions = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'prediction_text\': \'answer\'}] >>> references = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'answers\': [\'answer\', \'another_answer\']}] >>> results = super_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {\'exact_match\': 1.0, \'f1\': 1.0} >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'multirc\') >>> predictions = [{\'idx\': {\'answer\': 0, \'paragraph\': 0, \'question\': 0}, \'prediction\': 0}, {\'idx\': {\'answer\': 1, \'paragraph\': 2, \'question\': 3}, \'prediction\': 1}] >>> references = [0, 1] >>> results = super_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {\'exact_match\': 1.0, \'f1_m\': 1.0, \'f1_a\': 1.0} >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'axb\') >>> references = [0, 1] >>> predictions = [0, 1] >>> results = super_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {\'matthews_correlation\': 1.0} ''' def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Optional[Any] , __magic_name__ : Optional[Any] ) -> Union[str, Any]: """simple docstring""" return float((preds == labels).mean() ) def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Dict , __magic_name__ : int , __magic_name__ : Any="binary" ) -> Dict: """simple docstring""" UpperCamelCase :List[str] = simple_accuracy(__magic_name__ , __magic_name__ ) UpperCamelCase :Dict = float(fa_score(y_true=__magic_name__ , y_pred=__magic_name__ , average=__magic_name__ ) ) return { "accuracy": acc, "f1": fa, } def SCREAMING_SNAKE_CASE_ ( __magic_name__ : List[Any] , __magic_name__ : Optional[Any] ) -> Optional[Any]: """simple docstring""" UpperCamelCase :Optional[Any] = {} for id_pred, label in zip(__magic_name__ , __magic_name__ ): UpperCamelCase :str = f"""{id_pred['idx']['paragraph']}-{id_pred['idx']['question']}""" UpperCamelCase :Union[str, Any] = id_pred["""prediction"""] if question_id in question_map: question_map[question_id].append((pred, label) ) else: UpperCamelCase :Dict = [(pred, label)] UpperCamelCase , UpperCamelCase :Optional[int] = [], [] for question, preds_labels in question_map.items(): UpperCamelCase , UpperCamelCase :Optional[Any] = zip(*__magic_name__ ) UpperCamelCase :Optional[int] = fa_score(y_true=__magic_name__ , y_pred=__magic_name__ , average="""macro""" ) fas.append(__magic_name__ ) UpperCamelCase :int = int(sum(pred == label for pred, label in preds_labels ) == len(__magic_name__ ) ) ems.append(__magic_name__ ) UpperCamelCase :Optional[int] = float(sum(__magic_name__ ) / len(__magic_name__ ) ) UpperCamelCase :str = sum(__magic_name__ ) / len(__magic_name__ ) UpperCamelCase :Tuple = float(fa_score(y_true=__magic_name__ , y_pred=[id_pred["""prediction"""] for id_pred in ids_preds] ) ) return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a} @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class _SCREAMING_SNAKE_CASE ( datasets.Metric ): def _A ( self : str ): if self.config_name not in [ "boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg", ]: raise KeyError( """You should supply a configuration name selected in """ """[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]""" ) return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format="""numpy""" if not self.config_name == """record""" and not self.config_name == """multirc""" else None , ) def _A ( self : Optional[Any] ): if self.config_name == "record": return { "predictions": { "idx": { "passage": datasets.Value("""int64""" ), "query": datasets.Value("""int64""" ), }, "prediction_text": datasets.Value("""string""" ), }, "references": { "idx": { "passage": datasets.Value("""int64""" ), "query": datasets.Value("""int64""" ), }, "answers": datasets.Sequence(datasets.Value("""string""" ) ), }, } elif self.config_name == "multirc": return { "predictions": { "idx": { "answer": datasets.Value("""int64""" ), "paragraph": datasets.Value("""int64""" ), "question": datasets.Value("""int64""" ), }, "prediction": datasets.Value("""int64""" ), }, "references": datasets.Value("""int64""" ), } else: return { "predictions": datasets.Value("""int64""" ), "references": datasets.Value("""int64""" ), } def _A ( self : List[str] , __lowerCamelCase : Tuple , __lowerCamelCase : str ): if self.config_name == "axb": return {"matthews_correlation": matthews_corrcoef(__lowerCamelCase , __lowerCamelCase )} elif self.config_name == "cb": return acc_and_fa(__lowerCamelCase , __lowerCamelCase , fa_avg="""macro""" ) elif self.config_name == "record": UpperCamelCase :Optional[Any] = [ { """qas""": [ {"""id""": ref["""idx"""]["""query"""], """answers""": [{"""text""": ans} for ans in ref["""answers"""]]} for ref in references ] } ] UpperCamelCase :Tuple = {pred["""idx"""]["""query"""]: pred["""prediction_text"""] for pred in predictions} return evaluate_record(__lowerCamelCase , __lowerCamelCase )[0] elif self.config_name == "multirc": return evaluate_multirc(__lowerCamelCase , __lowerCamelCase ) elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]: return {"accuracy": simple_accuracy(__lowerCamelCase , __lowerCamelCase )} else: raise KeyError( """You should supply a configuration name selected in """ """[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]""" )
38
import math import numpy as np import qiskit from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute def SCREAMING_SNAKE_CASE_ ( __magic_name__ : int = 3 ) -> qiskit.result.counts.Counts: """simple docstring""" if isinstance(__magic_name__ , __magic_name__ ): raise TypeError("""number of qubits must be a integer.""" ) if number_of_qubits <= 0: raise ValueError("""number of qubits must be > 0.""" ) if math.floor(__magic_name__ ) != number_of_qubits: raise ValueError("""number of qubits must be exact integer.""" ) if number_of_qubits > 10: raise ValueError("""number of qubits too large to simulate(>10).""" ) UpperCamelCase :int = QuantumRegister(__magic_name__ , """qr""" ) UpperCamelCase :str = ClassicalRegister(__magic_name__ , """cr""" ) UpperCamelCase :str = QuantumCircuit(__magic_name__ , __magic_name__ ) UpperCamelCase :List[Any] = number_of_qubits for i in range(__magic_name__ ): quantum_circuit.h(number_of_qubits - i - 1 ) counter -= 1 for j in range(__magic_name__ ): quantum_circuit.cp(np.pi / 2 ** (counter - j) , __magic_name__ , __magic_name__ ) for k in range(number_of_qubits // 2 ): quantum_circuit.swap(__magic_name__ , number_of_qubits - k - 1 ) # measure all the qubits quantum_circuit.measure(__magic_name__ , __magic_name__ ) # simulate with 10000 shots UpperCamelCase :str = Aer.get_backend("""qasm_simulator""" ) UpperCamelCase :Dict = execute(__magic_name__ , __magic_name__ , shots=1_0000 ) return job.result().get_counts(__magic_name__ ) if __name__ == "__main__": print( F'''Total count for quantum fourier transform state is: \ {quantum_fourier_transform(3)}''' )
38
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available UpperCAmelCase_ : Union[str, Any] = { '''configuration_nezha''': ['''NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''NezhaConfig'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ : List[Any] = [ '''NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''NezhaForNextSentencePrediction''', '''NezhaForMaskedLM''', '''NezhaForPreTraining''', '''NezhaForMultipleChoice''', '''NezhaForQuestionAnswering''', '''NezhaForSequenceClassification''', '''NezhaForTokenClassification''', '''NezhaModel''', '''NezhaPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_nezha import ( NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST, NezhaForMaskedLM, NezhaForMultipleChoice, NezhaForNextSentencePrediction, NezhaForPreTraining, NezhaForQuestionAnswering, NezhaForSequenceClassification, NezhaForTokenClassification, NezhaModel, NezhaPreTrainedModel, ) else: import sys UpperCAmelCase_ : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
38
import unittest from pathlib import Path from tempfile import TemporaryDirectory from transformers import AutoConfig, TFAutoModel, is_tensorflow_text_available, is_tf_available from transformers.models.bert.tokenization_bert import BertTokenizer from transformers.testing_utils import require_tensorflow_text, require_tf, slow if is_tf_available(): import tensorflow as tf if is_tensorflow_text_available(): from transformers.models.bert import TFBertTokenizer UpperCAmelCase_ : Optional[Any] = ['''bert-base-uncased''', '''bert-base-cased'''] UpperCAmelCase_ : List[str] = '''hf-internal-testing/tiny-bert-tf-only''' if is_tf_available(): class _SCREAMING_SNAKE_CASE ( tf.keras.Model ): def __init__( self : List[str] , __lowerCamelCase : Union[str, Any] ): super().__init__() UpperCamelCase :Any = tokenizer UpperCamelCase :List[str] = AutoConfig.from_pretrained(__lowerCamelCase ) UpperCamelCase :List[str] = TFAutoModel.from_config(__lowerCamelCase ) def _A ( self : Tuple , __lowerCamelCase : str ): UpperCamelCase :str = self.tokenizer(__lowerCamelCase ) UpperCamelCase :Any = self.bert(**__lowerCamelCase ) return out["pooler_output"] @require_tf @require_tensorflow_text class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): def _A ( self : Dict ): super().setUp() UpperCamelCase :int = [ BertTokenizer.from_pretrained(__lowerCamelCase ) for checkpoint in (TOKENIZER_CHECKPOINTS * 2) ] # repeat for when fast_bert_tokenizer=false UpperCamelCase :Any = [TFBertTokenizer.from_pretrained(__lowerCamelCase ) for checkpoint in TOKENIZER_CHECKPOINTS] + [ TFBertTokenizer.from_pretrained(__lowerCamelCase , use_fast_bert_tokenizer=__lowerCamelCase ) for checkpoint in TOKENIZER_CHECKPOINTS ] assert len(self.tokenizers ) == len(self.tf_tokenizers ) UpperCamelCase :Any = [ """This is a straightforward English test sentence.""", """This one has some weird characters\rto\nsee\r\nif those\u00E9break things.""", """Now we're going to add some Chinese: 一 二 三 一二三""", """And some much more rare Chinese: 齉 堃 齉堃""", """Je vais aussi écrire en français pour tester les accents""", """Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ""", ] UpperCamelCase :Union[str, Any] = list(zip(self.test_sentences , self.test_sentences[::-1] ) ) def _A ( self : Optional[int] ): for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ): for test_inputs in (self.test_sentences, self.paired_sentences): UpperCamelCase :Any = tokenizer(__lowerCamelCase , return_tensors="""tf""" , padding="""longest""" ) UpperCamelCase :str = tf_tokenizer(__lowerCamelCase ) for key in python_outputs.keys(): self.assertTrue(tf.reduce_all(python_outputs[key].shape == tf_outputs[key].shape ) ) self.assertTrue(tf.reduce_all(tf.cast(python_outputs[key] , tf.intaa ) == tf_outputs[key] ) ) @slow def _A ( self : Dict ): for tf_tokenizer in self.tf_tokenizers: UpperCamelCase :str = tf_tokenizer(self.paired_sentences ) UpperCamelCase :Any = tf_tokenizer( text=[sentence[0] for sentence in self.paired_sentences] , text_pair=[sentence[1] for sentence in self.paired_sentences] , ) for key in merged_outputs.keys(): self.assertTrue(tf.reduce_all(tf.cast(merged_outputs[key] , tf.intaa ) == separated_outputs[key] ) ) @slow def _A ( self : List[str] ): for tf_tokenizer in self.tf_tokenizers: UpperCamelCase :List[Any] = tf.function(__lowerCamelCase ) for test_inputs in (self.test_sentences, self.paired_sentences): UpperCamelCase :Any = tf.constant(__lowerCamelCase ) UpperCamelCase :List[str] = compiled_tokenizer(__lowerCamelCase ) UpperCamelCase :Optional[Any] = tf_tokenizer(__lowerCamelCase ) for key in eager_outputs.keys(): self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) ) @slow def _A ( self : Tuple ): for tf_tokenizer in self.tf_tokenizers: UpperCamelCase :List[str] = ModelToSave(tokenizer=__lowerCamelCase ) UpperCamelCase :Union[str, Any] = tf.convert_to_tensor(self.test_sentences ) UpperCamelCase :Union[str, Any] = model(__lowerCamelCase ) # Build model with some sample inputs with TemporaryDirectory() as tempdir: UpperCamelCase :List[str] = Path(__lowerCamelCase ) / """saved.model""" model.save(__lowerCamelCase ) UpperCamelCase :List[Any] = tf.keras.models.load_model(__lowerCamelCase ) UpperCamelCase :Dict = loaded_model(__lowerCamelCase ) # We may see small differences because the loaded model is compiled, so we need an epsilon for the test self.assertLessEqual(tf.reduce_max(tf.abs(out - loaded_output ) ) , 1E-5 )
38
1