code
stringlengths 87
55.2k
| code_codestyle
int64 0
349
| style_context
stringlengths 135
49.1k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
import os
from datetime import datetime as dt
from github import Github
a__: Tuple = [
'good first issue',
'feature request',
'wip',
]
def UpperCamelCase__( )->List[Any]:
A__ = Github(os.environ['''GITHUB_TOKEN'''] )
A__ = g.get_repo('''huggingface/accelerate''' )
A__ = repo.get_issues(state='''open''' )
for issue in open_issues:
A__ = sorted([comment for comment in issue.get_comments()] , key=lambda UpperCamelCase__ : i.created_at , reverse=UpperCamelCase__ )
A__ = comments[0] if len(UpperCamelCase__ ) > 0 else None
A__ = dt.utcnow()
A__ = (current_time - issue.updated_at).days
A__ = (current_time - issue.created_at).days
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and days_since_updated > 7
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Close issue since it has been 7 days of inactivity since bot mention.
issue.edit(state='''closed''' )
elif (
days_since_updated > 23
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Add stale comment
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/accelerate/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''' )
if __name__ == "__main__":
main()
| 193
|
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
__lowercase = logging.get_logger(__name__)
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
a__ : Optional[Any] = ["""pixel_values"""]
def __init__( self , __lowercase = True , __lowercase = 32 , __lowercase=PILImageResampling.BILINEAR , __lowercase = True , **__lowercase , ) -> None:
__UpperCamelCase :Optional[int] = do_resize
__UpperCamelCase :Any = do_rescale
__UpperCamelCase :str = size_divisor
__UpperCamelCase :Dict = resample
super().__init__(**__lowercase)
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase , __lowercase = None , **__lowercase) -> np.ndarray:
__UpperCamelCase , __UpperCamelCase :int = get_image_size(__lowercase)
# Rounds the height and width down to the closest multiple of size_divisor
__UpperCamelCase :List[Any] = height // size_divisor * size_divisor
__UpperCamelCase :List[str] = width // size_divisor * size_divisor
__UpperCamelCase :str = resize(__lowercase , (new_h, new_w) , resample=__lowercase , data_format=__lowercase , **__lowercase)
return image
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase = None , **__lowercase) -> np.ndarray:
return rescale(image=__lowercase , scale=__lowercase , data_format=__lowercase , **__lowercase)
def UpperCamelCase__ ( self , __lowercase , __lowercase = None , __lowercase = None , __lowercase=None , __lowercase = None , __lowercase = None , __lowercase = ChannelDimension.FIRST , **__lowercase , ) -> BatchFeature:
__UpperCamelCase :Union[str, Any] = do_resize if do_resize is not None else self.do_resize
__UpperCamelCase :Tuple = do_rescale if do_rescale is not None else self.do_rescale
__UpperCamelCase :List[str] = size_divisor if size_divisor is not None else self.size_divisor
__UpperCamelCase :List[Any] = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError('''size_divisor is required for resizing''')
__UpperCamelCase :List[Any] = make_list_of_images(__lowercase)
if not valid_images(__lowercase):
raise ValueError('''Invalid image(s)''')
# All transformations expect numpy arrays.
__UpperCamelCase :Optional[Any] = [to_numpy_array(__lowercase) for img in images]
if do_resize:
__UpperCamelCase :List[str] = [self.resize(__lowercase , size_divisor=__lowercase , resample=__lowercase) for image in images]
if do_rescale:
__UpperCamelCase :Dict = [self.rescale(__lowercase , scale=1 / 255) for image in images]
__UpperCamelCase :str = [to_channel_dimension_format(__lowercase , __lowercase) for image in images]
__UpperCamelCase :int = {'''pixel_values''': images}
return BatchFeature(data=__lowercase , tensor_type=__lowercase)
| 43
| 0
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = '''▁'''
_UpperCamelCase = {'''vocab_file''': '''sentencepiece.bpe.model'''}
_UpperCamelCase = {
'''vocab_file''': {
'''facebook/mbart-large-en-ro''': (
'''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model'''
),
'''facebook/mbart-large-cc25''': (
'''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model'''
),
}
}
_UpperCamelCase = {
'''facebook/mbart-large-en-ro''': 1024,
'''facebook/mbart-large-cc25''': 1024,
}
# fmt: off
_UpperCamelCase = ['''ar_AR''', '''cs_CZ''', '''de_DE''', '''en_XX''', '''es_XX''', '''et_EE''', '''fi_FI''', '''fr_XX''', '''gu_IN''', '''hi_IN''', '''it_IT''', '''ja_XX''', '''kk_KZ''', '''ko_KR''', '''lt_LT''', '''lv_LV''', '''my_MM''', '''ne_NP''', '''nl_XX''', '''ro_RO''', '''ru_RU''', '''si_LK''', '''tr_TR''', '''vi_VN''', '''zh_CN''']
class _lowerCamelCase ( UpperCAmelCase_ ):
"""simple docstring"""
UpperCAmelCase_ : int =VOCAB_FILES_NAMES
UpperCAmelCase_ : List[str] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase_ : List[str] =PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ : Dict =["""input_ids""", """attention_mask"""]
UpperCAmelCase_ : List[int] =[]
UpperCAmelCase_ : List[int] =[]
def __init__( self , UpperCAmelCase , UpperCAmelCase="<s>" , UpperCAmelCase="</s>" , UpperCAmelCase="</s>" , UpperCAmelCase="<s>" , UpperCAmelCase="<unk>" , UpperCAmelCase="<pad>" , UpperCAmelCase="<mask>" , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase = None , UpperCAmelCase=None , **UpperCAmelCase , ) -> List[Any]:
'''simple docstring'''
__snake_case : Optional[int] = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else mask_token
__snake_case : Tuple = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__lowercase , eos_token=__lowercase , unk_token=__lowercase , sep_token=__lowercase , cls_token=__lowercase , pad_token=__lowercase , mask_token=__lowercase , tokenizer_file=__lowercase , src_lang=__lowercase , tgt_lang=__lowercase , additional_special_tokens=__lowercase , sp_model_kwargs=self.sp_model_kwargs , **__lowercase , )
__snake_case : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__lowercase ) )
__snake_case : Dict = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
__snake_case : List[Any] = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
__snake_case : Tuple = 1
__snake_case : List[Any] = len(self.sp_model )
__snake_case : Optional[int] = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(__lowercase )
}
__snake_case : Union[str, Any] = {v: k for k, v in self.lang_code_to_id.items()}
__snake_case : int = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
__snake_case : Optional[int] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
__snake_case : Optional[int] = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
__snake_case : List[str] = src_lang if src_lang is not None else '''en_XX'''
__snake_case : Dict = self.lang_code_to_id[self._src_lang]
__snake_case : List[Any] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self ) -> Tuple:
'''simple docstring'''
__snake_case : Dict = self.__dict__.copy()
__snake_case : Union[str, Any] = None
__snake_case : Tuple = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , UpperCAmelCase ) -> Any:
'''simple docstring'''
__snake_case : Optional[Any] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
__snake_case : str = {}
__snake_case : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def UpperCAmelCase ( self ) -> str:
'''simple docstring'''
return self._src_lang
@src_lang.setter
def UpperCAmelCase ( self , UpperCAmelCase ) -> None:
'''simple docstring'''
__snake_case : List[str] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowercase , token_ids_a=__lowercase , already_has_special_tokens=__lowercase )
__snake_case : Optional[int] = [1] * len(self.prefix_tokens )
__snake_case : List[Any] = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(__lowercase )) + suffix_ones
return prefix_ones + ([0] * len(__lowercase )) + ([0] * len(__lowercase )) + suffix_ones
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase = None ) -> List[int]:
'''simple docstring'''
__snake_case : Dict = [self.sep_token_id]
__snake_case : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" )
__snake_case : List[Any] = src_lang
__snake_case : Dict = self(__lowercase , add_special_tokens=__lowercase , return_tensors=__lowercase , **__lowercase )
__snake_case : str = self.convert_tokens_to_ids(__lowercase )
__snake_case : List[Any] = tgt_lang_id
return inputs
def UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
__snake_case : Optional[int] = {self.convert_ids_to_tokens(__lowercase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCAmelCase ( self , UpperCAmelCase ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(__lowercase , out_type=__lowercase )
def UpperCAmelCase ( self , UpperCAmelCase ) -> List[str]:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
__snake_case : Union[str, Any] = self.sp_model.PieceToId(__lowercase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def UpperCAmelCase ( self , UpperCAmelCase ) -> Any:
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def UpperCAmelCase ( self , UpperCAmelCase ) -> List[str]:
'''simple docstring'''
__snake_case : List[Any] = ''''''.join(__lowercase ).replace(__lowercase , " " ).strip()
return out_string
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(__lowercase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
__snake_case : Any = os.path.join(
__lowercase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowercase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __lowercase )
elif not os.path.isfile(self.vocab_file ):
with open(__lowercase , "wb" ) as fi:
__snake_case : List[str] = self.sp_model.serialized_model_proto()
fi.write(__lowercase )
return (out_vocab_file,)
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase = "en_XX" , UpperCAmelCase = None , UpperCAmelCase = "ro_RO" , **UpperCAmelCase , ) -> BatchEncoding:
'''simple docstring'''
__snake_case : Optional[int] = src_lang
__snake_case : str = tgt_lang
return super().prepare_seqaseq_batch(__lowercase , __lowercase , **__lowercase )
def UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def UpperCAmelCase ( self , UpperCAmelCase ) -> None:
'''simple docstring'''
__snake_case : Optional[Any] = self.lang_code_to_id[src_lang]
__snake_case : Optional[Any] = []
__snake_case : Tuple = [self.eos_token_id, self.cur_lang_code]
def UpperCAmelCase ( self , UpperCAmelCase ) -> None:
'''simple docstring'''
__snake_case : str = self.lang_code_to_id[lang]
__snake_case : Any = []
__snake_case : Dict = [self.eos_token_id, self.cur_lang_code]
| 326
|
from __future__ import annotations
from PIL import Image
# Define glider example
__lowercase = [
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
# Define blinker example
__lowercase = [[0, 1, 0], [0, 1, 0], [0, 1, 0]]
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :int = []
for i in range(len(SCREAMING_SNAKE_CASE ) ):
__UpperCamelCase :Dict = []
for j in range(len(cells[i] ) ):
# Get the number of live neighbours
__UpperCamelCase :List[str] = 0
if i > 0 and j > 0:
neighbour_count += cells[i - 1][j - 1]
if i > 0:
neighbour_count += cells[i - 1][j]
if i > 0 and j < len(cells[i] ) - 1:
neighbour_count += cells[i - 1][j + 1]
if j > 0:
neighbour_count += cells[i][j - 1]
if j < len(cells[i] ) - 1:
neighbour_count += cells[i][j + 1]
if i < len(SCREAMING_SNAKE_CASE ) - 1 and j > 0:
neighbour_count += cells[i + 1][j - 1]
if i < len(SCREAMING_SNAKE_CASE ) - 1:
neighbour_count += cells[i + 1][j]
if i < len(SCREAMING_SNAKE_CASE ) - 1 and j < len(cells[i] ) - 1:
neighbour_count += cells[i + 1][j + 1]
# Rules of the game of life (excerpt from Wikipedia):
# 1. Any live cell with two or three live neighbours survives.
# 2. Any dead cell with three live neighbours becomes a live cell.
# 3. All other live cells die in the next generation.
# Similarly, all other dead cells stay dead.
__UpperCamelCase :List[str] = cells[i][j] == 1
if (
(alive and 2 <= neighbour_count <= 3)
or not alive
and neighbour_count == 3
):
next_generation_row.append(1 )
else:
next_generation_row.append(0 )
next_generation.append(SCREAMING_SNAKE_CASE )
return next_generation
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Optional[Any] = []
for _ in range(SCREAMING_SNAKE_CASE ):
# Create output image
__UpperCamelCase :Dict = Image.new('''RGB''' , (len(cells[0] ), len(SCREAMING_SNAKE_CASE )) )
__UpperCamelCase :Any = img.load()
# Save cells to image
for x in range(len(SCREAMING_SNAKE_CASE ) ):
for y in range(len(cells[0] ) ):
__UpperCamelCase :Optional[Any] = 255 - cells[y][x] * 255
__UpperCamelCase :int = (colour, colour, colour)
# Save image
images.append(SCREAMING_SNAKE_CASE )
__UpperCamelCase :Optional[int] = new_generation(SCREAMING_SNAKE_CASE )
return images
if __name__ == "__main__":
__lowercase = generate_images(GLIDER, 16)
images[0].save('''out.gif''', save_all=True, append_images=images[1:])
| 43
| 0
|
'''simple docstring'''
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class _a :
'''simple docstring'''
def __init__( self, A, A=13, A=7, A=True, A=True, A=False, A=True, A=99, A=32, A=5, A=4, A=37, A="gelu", A=0.1, A=0.1, A=512, A=16, A=2, A=0.02, A=3, A=4, A=None, ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = parent
SCREAMING_SNAKE_CASE : List[str] = batch_size
SCREAMING_SNAKE_CASE : int = seq_length
SCREAMING_SNAKE_CASE : str = is_training
SCREAMING_SNAKE_CASE : Optional[Any] = use_input_mask
SCREAMING_SNAKE_CASE : List[Any] = use_token_type_ids
SCREAMING_SNAKE_CASE : Optional[Any] = use_labels
SCREAMING_SNAKE_CASE : List[Any] = vocab_size
SCREAMING_SNAKE_CASE : Optional[int] = hidden_size
SCREAMING_SNAKE_CASE : Tuple = num_hidden_layers
SCREAMING_SNAKE_CASE : Tuple = num_attention_heads
SCREAMING_SNAKE_CASE : Optional[int] = intermediate_size
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_act
SCREAMING_SNAKE_CASE : int = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Optional[int] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Tuple = max_position_embeddings
SCREAMING_SNAKE_CASE : str = type_vocab_size
SCREAMING_SNAKE_CASE : List[Any] = type_sequence_label_size
SCREAMING_SNAKE_CASE : Optional[int] = initializer_range
SCREAMING_SNAKE_CASE : str = num_labels
SCREAMING_SNAKE_CASE : Any = num_choices
SCREAMING_SNAKE_CASE : Any = scope
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
SCREAMING_SNAKE_CASE : int = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE : str = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE : Dict = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
SCREAMING_SNAKE_CASE : Optional[Any] = None
SCREAMING_SNAKE_CASE : Dict = None
SCREAMING_SNAKE_CASE : Any = None
if self.use_labels:
SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size], self.type_sequence_label_size )
SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length], self.num_labels )
SCREAMING_SNAKE_CASE : Optional[int] = ids_tensor([self.batch_size], self.num_choices )
SCREAMING_SNAKE_CASE : int = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase_ ( self ):
'''simple docstring'''
return LlamaConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=__lowercase, initializer_range=self.initializer_range, )
def UpperCamelCase_ ( self, A, A, A, A, A, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = LlamaModel(config=__lowercase )
model.to(__lowercase )
model.eval()
SCREAMING_SNAKE_CASE : str = model(__lowercase, attention_mask=__lowercase )
SCREAMING_SNAKE_CASE : Any = model(__lowercase )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase_ ( self, A, A, A, A, A, A, A, A, A, ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = True
SCREAMING_SNAKE_CASE : Dict = LlamaModel(__lowercase )
model.to(__lowercase )
model.eval()
SCREAMING_SNAKE_CASE : int = model(
__lowercase, attention_mask=__lowercase, encoder_hidden_states=__lowercase, encoder_attention_mask=__lowercase, )
SCREAMING_SNAKE_CASE : Any = model(
__lowercase, attention_mask=__lowercase, encoder_hidden_states=__lowercase, )
SCREAMING_SNAKE_CASE : Tuple = model(__lowercase, attention_mask=__lowercase )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase_ ( self, A, A, A, A, A, A, A, A, A, ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = LlamaForCausalLM(config=__lowercase )
model.to(__lowercase )
model.eval()
SCREAMING_SNAKE_CASE : Any = model(__lowercase, attention_mask=__lowercase, labels=__lowercase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase_ ( self, A, A, A, A, A, A, A, A, A, ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = True
SCREAMING_SNAKE_CASE : Tuple = True
SCREAMING_SNAKE_CASE : List[Any] = LlamaForCausalLM(config=__lowercase )
model.to(__lowercase )
model.eval()
# first forward pass
SCREAMING_SNAKE_CASE : Union[str, Any] = model(
__lowercase, attention_mask=__lowercase, encoder_hidden_states=__lowercase, encoder_attention_mask=__lowercase, use_cache=__lowercase, )
SCREAMING_SNAKE_CASE : Optional[Any] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
SCREAMING_SNAKE_CASE : Optional[int] = ids_tensor((self.batch_size, 3), config.vocab_size )
SCREAMING_SNAKE_CASE : int = ids_tensor((self.batch_size, 3), vocab_size=2 )
# append to next input_ids and
SCREAMING_SNAKE_CASE : Any = torch.cat([input_ids, next_tokens], dim=-1 )
SCREAMING_SNAKE_CASE : Optional[int] = torch.cat([input_mask, next_mask], dim=-1 )
SCREAMING_SNAKE_CASE : Union[str, Any] = model(
__lowercase, attention_mask=__lowercase, encoder_hidden_states=__lowercase, encoder_attention_mask=__lowercase, output_hidden_states=__lowercase, )['''hidden_states'''][0]
SCREAMING_SNAKE_CASE : Tuple = model(
__lowercase, attention_mask=__lowercase, encoder_hidden_states=__lowercase, encoder_attention_mask=__lowercase, past_key_values=__lowercase, output_hidden_states=__lowercase, )['''hidden_states'''][0]
# select random slice
SCREAMING_SNAKE_CASE : Any = ids_tensor((1,), output_from_past.shape[-1] ).item()
SCREAMING_SNAKE_CASE : Optional[Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
SCREAMING_SNAKE_CASE : Union[str, Any] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__lowercase, __lowercase, atol=1E-3 ) )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self.prepare_config_and_inputs()
(
SCREAMING_SNAKE_CASE
) : List[Any] = config_and_inputs
SCREAMING_SNAKE_CASE : Any = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class _a ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
A : Union[str, Any] = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
A : Union[str, Any] = (LlamaForCausalLM,) if is_torch_available() else ()
A : List[Any] = (
{
"""feature-extraction""": LlamaModel,
"""text-classification""": LlamaForSequenceClassification,
"""text-generation""": LlamaForCausalLM,
"""zero-shot""": LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
A : Tuple = False
A : Any = False
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = LlamaModelTester(self )
SCREAMING_SNAKE_CASE : Union[str, Any] = ConfigTester(self, config_class=__lowercase, hidden_size=37 )
def UpperCamelCase_ ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowercase )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
SCREAMING_SNAKE_CASE : Any = type
self.model_tester.create_and_check_model(*__lowercase )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : Dict = 3
SCREAMING_SNAKE_CASE : int = input_dict['''input_ids''']
SCREAMING_SNAKE_CASE : Tuple = input_ids.ne(1 ).to(__lowercase )
SCREAMING_SNAKE_CASE : Any = ids_tensor([self.model_tester.batch_size], self.model_tester.type_sequence_label_size )
SCREAMING_SNAKE_CASE : str = LlamaForSequenceClassification(__lowercase )
model.to(__lowercase )
model.eval()
SCREAMING_SNAKE_CASE : str = model(__lowercase, attention_mask=__lowercase, labels=__lowercase )
self.assertEqual(result.logits.shape, (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : Optional[int] = 3
SCREAMING_SNAKE_CASE : Optional[int] = '''single_label_classification'''
SCREAMING_SNAKE_CASE : List[str] = input_dict['''input_ids''']
SCREAMING_SNAKE_CASE : str = input_ids.ne(1 ).to(__lowercase )
SCREAMING_SNAKE_CASE : Optional[int] = ids_tensor([self.model_tester.batch_size], self.model_tester.type_sequence_label_size )
SCREAMING_SNAKE_CASE : Tuple = LlamaForSequenceClassification(__lowercase )
model.to(__lowercase )
model.eval()
SCREAMING_SNAKE_CASE : List[Any] = model(__lowercase, attention_mask=__lowercase, labels=__lowercase )
self.assertEqual(result.logits.shape, (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : int = 3
SCREAMING_SNAKE_CASE : Dict = '''multi_label_classification'''
SCREAMING_SNAKE_CASE : Optional[int] = input_dict['''input_ids''']
SCREAMING_SNAKE_CASE : int = input_ids.ne(1 ).to(__lowercase )
SCREAMING_SNAKE_CASE : Optional[int] = ids_tensor(
[self.model_tester.batch_size, config.num_labels], self.model_tester.type_sequence_label_size ).to(torch.float )
SCREAMING_SNAKE_CASE : str = LlamaForSequenceClassification(__lowercase )
model.to(__lowercase )
model.eval()
SCREAMING_SNAKE_CASE : Any = model(__lowercase, attention_mask=__lowercase, labels=__lowercase )
self.assertEqual(result.logits.shape, (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('LLaMA buffers include complex numbers, which breaks this test' )
def UpperCamelCase_ ( self ):
'''simple docstring'''
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor([1, 10], config.vocab_size )
SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )], config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
SCREAMING_SNAKE_CASE : str = LlamaModel(__lowercase )
original_model.to(__lowercase )
original_model.eval()
SCREAMING_SNAKE_CASE : Optional[Any] = original_model(__lowercase ).last_hidden_state
SCREAMING_SNAKE_CASE : Optional[int] = original_model(__lowercase ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
SCREAMING_SNAKE_CASE : List[str] = {'''type''': scaling_type, '''factor''': 10.0}
SCREAMING_SNAKE_CASE : int = LlamaModel(__lowercase )
scaled_model.to(__lowercase )
scaled_model.eval()
SCREAMING_SNAKE_CASE : List[str] = scaled_model(__lowercase ).last_hidden_state
SCREAMING_SNAKE_CASE : List[Any] = scaled_model(__lowercase ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(__lowercase, __lowercase, atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(__lowercase, __lowercase, atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(__lowercase, __lowercase, atol=1E-5 ) )
@require_torch
class _a ( unittest.TestCase ):
'''simple docstring'''
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
SCREAMING_SNAKE_CASE : Optional[Any] = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-7b-hf', device_map='auto' )
SCREAMING_SNAKE_CASE : Tuple = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
SCREAMING_SNAKE_CASE : Any = torch.tensor([[-6.65_50, -4.12_27, -4.98_59, -3.24_06, 0.82_62, -3.00_33, 1.29_64, -3.36_99]] )
torch.testing.assert_close(out.mean(-1 ), __lowercase, atol=1E-2, rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
SCREAMING_SNAKE_CASE : Any = torch.tensor([-12.82_81, -7.44_53, -0.46_39, -8.06_25, -7.25_00, -8.00_00, -6.48_83, -7.76_95, -7.84_38, -7.03_12, -6.21_88, -7.13_28, -1.84_96, 1.99_61, -8.62_50, -6.72_27, -12.82_81, -6.94_92, -7.07_42, -7.78_52, -7.58_20, -7.90_62, -6.93_75, -7.98_05, -8.34_38, -8.15_62, -8.04_69, -7.62_50, -7.74_22, -7.33_98,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30], __lowercase, atol=1E-5, rtol=1E-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
SCREAMING_SNAKE_CASE : Any = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-hf', device_map='auto' )
SCREAMING_SNAKE_CASE : Optional[int] = model(torch.tensor(__lowercase ) )
# Expected mean on dim = -1
SCREAMING_SNAKE_CASE : Any = torch.tensor([[-2.06_22, -1.27_94, -1.16_38, -0.97_88, -1.46_03, -1.02_38, -1.78_93, -1.44_11]] )
torch.testing.assert_close(out.mean(-1 ), __lowercase, atol=1E-2, rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([-8.14_06, -8.05_47, 2.74_61, -1.23_44, -0.14_48, -1.82_62, -1.00_20, -1.81_54, -1.68_95, -1.85_16, -2.35_74, -0.92_77, 3.75_98, 6.57_42, -1.29_98, -0.11_77, -8.14_06, -2.96_88, -2.91_99, -3.16_99, -3.52_54, -2.35_55, -2.79_88, -3.41_41, -2.82_62, -4.51_95, -3.33_79, -3.31_64, -2.78_32, -3.02_73] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30], __lowercase, atol=1E-5, rtol=1E-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
SCREAMING_SNAKE_CASE : Optional[int] = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-chat-hf', device_map='auto' )
SCREAMING_SNAKE_CASE : str = model(torch.tensor(__lowercase ) )
# Expected mean on dim = -1
SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([[-0.85_62, -1.85_20, -0.75_51, -0.41_62, -1.51_61, -1.20_38, -2.48_23, -2.32_54]] )
torch.testing.assert_close(out.mean(-1 ), __lowercase, atol=1E-2, rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([-2.22_27, 4.88_28, 0.90_23, -0.45_78, -0.78_71, -0.10_33, -0.62_21, -0.57_86, -0.78_03, -1.06_74, -1.29_20, -0.15_70, 0.80_08, 2.07_23, -0.94_97, 0.27_71, -2.22_27, -0.76_12, -1.43_46, -1.20_61, -1.64_26, -0.30_00, -0.71_39, -1.19_34, -1.86_91, -1.69_73, -1.59_47, -1.27_05, -0.35_23, -0.55_13] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ), __lowercase, atol=1E-2, rtol=1E-2 )
@unittest.skip(
'Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test' )
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
SCREAMING_SNAKE_CASE : List[Any] = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-70b-hf', device_map='auto' )
SCREAMING_SNAKE_CASE : List[str] = model(torch.tensor(__lowercase ) )
SCREAMING_SNAKE_CASE : Dict = torch.tensor(
[[-4.23_27, -3.33_60, -4.66_65, -4.76_31, -1.81_80, -3.41_70, -1.42_11, -3.18_10]], dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ), __lowercase, atol=1E-2, rtol=1E-2 )
# fmt: off
SCREAMING_SNAKE_CASE : str = torch.tensor([-9.49_22, -3.95_51, 1.79_98, -5.67_58, -5.10_55, -5.89_84, -4.83_20, -6.80_86, -6.53_91, -5.61_72, -5.58_20, -5.53_52, 1.78_81, 3.62_89, -6.51_17, -3.47_85, -9.50_00, -6.03_52, -6.81_25, -6.01_95, -6.68_36, -5.47_27, -6.28_12, -6.03_91, -7.33_98, -7.42_97, -7.48_44, -6.58_20, -5.87_89, -5.53_12] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30], __lowercase, atol=1E-5, rtol=1E-5 )
@unittest.skip('Model is curently gated' )
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = '''Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the "princi'''
SCREAMING_SNAKE_CASE : Optional[int] = '''Simply put, the theory of relativity states that '''
SCREAMING_SNAKE_CASE : Tuple = LlamaTokenizer.from_pretrained('meta-llama/Llama-2-13b-chat-hf' )
SCREAMING_SNAKE_CASE : Dict = tokenizer.encode(__lowercase, return_tensors='pt' )
SCREAMING_SNAKE_CASE : Tuple = LlamaForCausalLM.from_pretrained(
'meta-llama/Llama-2-13b-chat-hf', device_map='sequential', use_safetensors=__lowercase )
# greedy generation outputs
SCREAMING_SNAKE_CASE : List[Any] = model.generate(__lowercase, max_new_tokens=64, top_p=__lowercase, temperature=1, do_sample=__lowercase )
SCREAMING_SNAKE_CASE : str = tokenizer.decode(generated_ids[0], skip_special_tokens=__lowercase )
self.assertEqual(__lowercase, __lowercase )
| 251
|
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
__lowercase = logging.get_logger(__name__)
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Tuple = R'''\w+[.]\d+'''
__UpperCamelCase :List[str] = re.findall(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for pat in pats:
__UpperCamelCase :int = key.replace(SCREAMING_SNAKE_CASE , '''_'''.join(pat.split('''.''' ) ) )
return key
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Tuple = pt_tuple_key[:-1] + ('''scale''',)
if (
any('''norm''' in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
__UpperCamelCase :str = pt_tuple_key[:-1] + ('''scale''',)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
__UpperCamelCase :Any = pt_tuple_key[:-1] + ('''scale''',)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
__UpperCamelCase :str = pt_tuple_key[:-1] + ('''embedding''',)
return renamed_pt_tuple_key, pt_tensor
# conv layer
__UpperCamelCase :List[str] = pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
__UpperCamelCase :List[Any] = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
__UpperCamelCase :List[str] = pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight":
__UpperCamelCase :Any = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
__UpperCamelCase :int = pt_tuple_key[:-1] + ('''weight''',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
__UpperCamelCase :int = pt_tuple_key[:-1] + ('''bias''',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=42 ):
'''simple docstring'''
__UpperCamelCase :Union[str, Any] = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
__UpperCamelCase :str = flax_model.init_weights(PRNGKey(SCREAMING_SNAKE_CASE ) )
__UpperCamelCase :int = flatten_dict(SCREAMING_SNAKE_CASE )
__UpperCamelCase :List[Any] = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
__UpperCamelCase :List[Any] = rename_key(SCREAMING_SNAKE_CASE )
__UpperCamelCase :List[Any] = tuple(renamed_pt_key.split('''.''' ) )
# Correctly rename weight parameters
__UpperCamelCase , __UpperCamelCase :Any = rename_key_and_reshape_tensor(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f"""PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape """
f"""{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
# also add unexpected weight so that warning is thrown
__UpperCamelCase :str = jnp.asarray(SCREAMING_SNAKE_CASE )
return unflatten_dict(SCREAMING_SNAKE_CASE )
| 43
| 0
|
'''simple docstring'''
import inspect
import unittest
from transformers import YolosConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import YolosForObjectDetection, YolosModel
from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A__ :
"""simple docstring"""
def __init__( self : str , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Tuple=1_3 , lowerCAmelCase__ : str=[3_0, 3_0] , lowerCAmelCase__ : str=2 , lowerCAmelCase__ : Optional[Any]=3 , lowerCAmelCase__ : Any=True , lowerCAmelCase__ : Optional[Any]=True , lowerCAmelCase__ : Union[str, Any]=3_2 , lowerCAmelCase__ : Optional[Any]=5 , lowerCAmelCase__ : Union[str, Any]=4 , lowerCAmelCase__ : int=3_7 , lowerCAmelCase__ : Union[str, Any]="gelu" , lowerCAmelCase__ : List[Any]=0.1 , lowerCAmelCase__ : Optional[int]=0.1 , lowerCAmelCase__ : List[Any]=1_0 , lowerCAmelCase__ : Any=0.02 , lowerCAmelCase__ : List[Any]=3 , lowerCAmelCase__ : int=None , lowerCAmelCase__ : Dict=8 , lowerCAmelCase__ : List[Any]=1_0 , ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase : Optional[int] = parent
_UpperCAmelCase : List[str] = batch_size
_UpperCAmelCase : Tuple = image_size
_UpperCAmelCase : int = patch_size
_UpperCAmelCase : Union[str, Any] = num_channels
_UpperCAmelCase : Any = is_training
_UpperCAmelCase : Union[str, Any] = use_labels
_UpperCAmelCase : List[Any] = hidden_size
_UpperCAmelCase : Tuple = num_hidden_layers
_UpperCAmelCase : Tuple = num_attention_heads
_UpperCAmelCase : Union[str, Any] = intermediate_size
_UpperCAmelCase : Union[str, Any] = hidden_act
_UpperCAmelCase : int = hidden_dropout_prob
_UpperCAmelCase : Dict = attention_probs_dropout_prob
_UpperCAmelCase : Optional[int] = type_sequence_label_size
_UpperCAmelCase : List[str] = initializer_range
_UpperCAmelCase : Dict = num_labels
_UpperCAmelCase : Tuple = scope
_UpperCAmelCase : Optional[int] = n_targets
_UpperCAmelCase : Any = num_detection_tokens
# we set the expected sequence length (which is used in several tests)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens
_UpperCAmelCase : str = (image_size[1] // patch_size) * (image_size[0] // patch_size)
_UpperCAmelCase : Dict = num_patches + 1 + self.num_detection_tokens
def _lowerCAmelCase ( self : str ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]] )
_UpperCAmelCase : Tuple = None
if self.use_labels:
# labels is a list of Dict (each Dict being the labels for a given example in the batch)
_UpperCAmelCase : List[Any] = []
for i in range(self.batch_size ):
_UpperCAmelCase : Optional[Any] = {}
_UpperCAmelCase : str = torch.randint(
high=self.num_labels , size=(self.n_targets,) , device=__lowercase )
_UpperCAmelCase : Optional[Any] = torch.rand(self.n_targets , 4 , device=__lowercase )
labels.append(__lowercase )
_UpperCAmelCase : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def _lowerCAmelCase ( self : int ) -> List[Any]:
"""simple docstring"""
return YolosConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__lowercase , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , )
def _lowerCAmelCase ( self : Optional[int] , lowerCAmelCase__ : Any , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Dict ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase : Optional[Any] = YolosModel(config=__lowercase )
model.to(__lowercase )
model.eval()
_UpperCAmelCase : Union[str, Any] = model(__lowercase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size) )
def _lowerCAmelCase ( self : Optional[int] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : int ) -> int:
"""simple docstring"""
_UpperCAmelCase : str = YolosForObjectDetection(__lowercase )
model.to(__lowercase )
model.eval()
_UpperCAmelCase : str = model(pixel_values=__lowercase )
_UpperCAmelCase : Optional[int] = model(__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
_UpperCAmelCase : Dict = model(pixel_values=__lowercase , labels=__lowercase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
def _lowerCAmelCase ( self : Any ) -> Any:
"""simple docstring"""
_UpperCAmelCase : int = self.prepare_config_and_inputs()
_UpperCAmelCase : Union[str, Any] = config_and_inputs
_UpperCAmelCase : Union[str, Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class A__ ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : Optional[int] = (YolosModel, YolosForObjectDetection) if is_torch_available() else ()
UpperCamelCase_ : Tuple = (
{"""feature-extraction""": YolosModel, """object-detection""": YolosForObjectDetection} if is_torch_available() else {}
)
UpperCamelCase_ : Optional[Any] = False
UpperCamelCase_ : Optional[Any] = False
UpperCamelCase_ : Union[str, Any] = False
UpperCamelCase_ : List[str] = False
def _lowerCAmelCase ( self : Any , lowerCAmelCase__ : int , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Tuple=False ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase : Optional[Any] = super()._prepare_for_class(__lowercase , __lowercase , return_labels=__lowercase )
if return_labels:
if model_class.__name__ == "YolosForObjectDetection":
_UpperCAmelCase : Dict = []
for i in range(self.model_tester.batch_size ):
_UpperCAmelCase : Union[str, Any] = {}
_UpperCAmelCase : Tuple = torch.ones(
size=(self.model_tester.n_targets,) , device=__lowercase , dtype=torch.long )
_UpperCAmelCase : str = torch.ones(
self.model_tester.n_targets , 4 , device=__lowercase , dtype=torch.float )
labels.append(__lowercase )
_UpperCAmelCase : Union[str, Any] = labels
return inputs_dict
def _lowerCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase : Any = YolosModelTester(self )
_UpperCAmelCase : List[Any] = ConfigTester(self , config_class=__lowercase , has_text_modality=__lowercase , hidden_size=3_7 )
def _lowerCAmelCase ( self : List[str] ) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
def _lowerCAmelCase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
pass
def _lowerCAmelCase ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase : Tuple = model_class(__lowercase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_UpperCAmelCase : Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowercase , nn.Linear ) )
def _lowerCAmelCase ( self : int ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase : str = model_class(__lowercase )
_UpperCAmelCase : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase : int = [*signature.parameters.keys()]
_UpperCAmelCase : Tuple = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __lowercase )
def _lowerCAmelCase ( self : Optional[int] ) -> Any:
"""simple docstring"""
_UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowercase )
def _lowerCAmelCase ( self : Tuple ) -> str:
"""simple docstring"""
_UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase : str = True
# in YOLOS, the seq_len is different
_UpperCAmelCase : List[str] = self.model_tester.expected_seq_len
for model_class in self.all_model_classes:
_UpperCAmelCase : Dict = True
_UpperCAmelCase : List[Any] = False
_UpperCAmelCase : Optional[Any] = True
_UpperCAmelCase : List[Any] = model_class(__lowercase )
model.to(__lowercase )
model.eval()
with torch.no_grad():
_UpperCAmelCase : Optional[Any] = model(**self._prepare_for_class(__lowercase , __lowercase ) )
_UpperCAmelCase : List[str] = outputs.attentions
self.assertEqual(len(__lowercase ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_UpperCAmelCase : Optional[Any] = True
_UpperCAmelCase : List[str] = model_class(__lowercase )
model.to(__lowercase )
model.eval()
with torch.no_grad():
_UpperCAmelCase : Optional[Any] = model(**self._prepare_for_class(__lowercase , __lowercase ) )
_UpperCAmelCase : List[str] = outputs.attentions
self.assertEqual(len(__lowercase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
_UpperCAmelCase : int = len(__lowercase )
# Check attention is always last and order is fine
_UpperCAmelCase : Any = True
_UpperCAmelCase : List[Any] = True
_UpperCAmelCase : Dict = model_class(__lowercase )
model.to(__lowercase )
model.eval()
with torch.no_grad():
_UpperCAmelCase : int = model(**self._prepare_for_class(__lowercase , __lowercase ) )
_UpperCAmelCase : List[Any] = 1
self.assertEqual(out_len + added_hidden_states , len(__lowercase ) )
_UpperCAmelCase : Union[str, Any] = outputs.attentions
self.assertEqual(len(__lowercase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def _lowerCAmelCase ( self : str ) -> Optional[Any]:
"""simple docstring"""
def check_hidden_states_output(lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : List[str] ):
_UpperCAmelCase : List[Any] = model_class(__lowercase )
model.to(__lowercase )
model.eval()
with torch.no_grad():
_UpperCAmelCase : Optional[Any] = model(**self._prepare_for_class(__lowercase , __lowercase ) )
_UpperCAmelCase : Any = outputs.hidden_states
_UpperCAmelCase : Any = getattr(
self.model_tester , "expected_num_hidden_layers" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(__lowercase ) , __lowercase )
# YOLOS has a different seq_length
_UpperCAmelCase : List[str] = self.model_tester.expected_seq_len
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
_UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase : Tuple = True
check_hidden_states_output(__lowercase , __lowercase , __lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCAmelCase : Any = True
check_hidden_states_output(__lowercase , __lowercase , __lowercase )
def _lowerCAmelCase ( self : List[str] ) -> Any:
"""simple docstring"""
_UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_object_detection(*__lowercase )
@slow
def _lowerCAmelCase ( self : List[str] ) -> Any:
"""simple docstring"""
for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase : Any = YolosModel.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
def __UpperCAmelCase ( ):
_UpperCAmelCase : Union[str, Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class A__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _lowerCAmelCase ( self : List[str] ) -> List[str]:
"""simple docstring"""
return AutoImageProcessor.from_pretrained("hustvl/yolos-small" ) if is_vision_available() else None
@slow
def _lowerCAmelCase ( self : int ) -> Any:
"""simple docstring"""
_UpperCAmelCase : List[str] = YolosForObjectDetection.from_pretrained("hustvl/yolos-small" ).to(__lowercase )
_UpperCAmelCase : Dict = self.default_image_processor
_UpperCAmelCase : Optional[int] = prepare_img()
_UpperCAmelCase : Any = image_processor(images=__lowercase , return_tensors="pt" ).to(__lowercase )
# forward pass
with torch.no_grad():
_UpperCAmelCase : Optional[int] = model(inputs.pixel_values )
# verify outputs
_UpperCAmelCase : str = torch.Size((1, 1_0_0, 9_2) )
self.assertEqual(outputs.logits.shape , __lowercase )
_UpperCAmelCase : int = torch.tensor(
[[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]] , device=__lowercase , )
_UpperCAmelCase : Any = torch.tensor(
[[0.2559, 0.5455, 0.4706], [0.2989, 0.7279, 0.1875], [0.7732, 0.4017, 0.4462]] , device=__lowercase )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , __lowercase , atol=1e-4 ) )
self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , __lowercase , atol=1e-4 ) )
# verify postprocessing
_UpperCAmelCase : Tuple = image_processor.post_process_object_detection(
__lowercase , threshold=0.3 , target_sizes=[image.size[::-1]] )[0]
_UpperCAmelCase : Optional[int] = torch.tensor([0.9994, 0.9790, 0.9964, 0.9972, 0.9861] ).to(__lowercase )
_UpperCAmelCase : Optional[int] = [7_5, 7_5, 1_7, 6_3, 1_7]
_UpperCAmelCase : int = torch.tensor([3_3_5.0_6_0_9, 79.3848, 3_7_5.4_2_1_6, 1_8_7.2_4_9_5] ).to(__lowercase )
self.assertEqual(len(results["scores"] ) , 5 )
self.assertTrue(torch.allclose(results["scores"] , __lowercase , atol=1e-4 ) )
self.assertSequenceEqual(results["labels"].tolist() , __lowercase )
self.assertTrue(torch.allclose(results["boxes"][0, :] , __lowercase ) )
| 145
|
import argparse
import torch
from ...utils import logging
from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert
logging.set_verbosity_info()
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :List[Any] = AlbertConfig.from_json_file(SCREAMING_SNAKE_CASE )
print(f"""Building PyTorch model from configuration: {config}""" )
__UpperCamelCase :List[str] = AlbertForPreTraining(SCREAMING_SNAKE_CASE )
# Load weights from tf checkpoint
load_tf_weights_in_albert(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--albert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained ALBERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__lowercase = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
| 43
| 0
|
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
_snake_case = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
_snake_case = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
_snake_case = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1000))
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Any = len([g for position, g in enumerate(_lowerCamelCase ) if g == main_target[position]] )
return (item, float(_lowerCamelCase ))
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = random.randint(0 , len(_lowerCamelCase ) - 1 )
_lowerCAmelCase : Tuple = parent_a[:random_slice] + parent_a[random_slice:]
_lowerCAmelCase : Union[str, Any] = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : int = list(_lowerCamelCase )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
_lowerCAmelCase : str = random.choice(_lowerCamelCase )
return "".join(_lowerCamelCase )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ):
'''simple docstring'''
_lowerCAmelCase : int = []
# Generate more children proportionally to the fitness score.
_lowerCAmelCase : int = int(parent_a[1] * 100 ) + 1
_lowerCAmelCase : List[str] = 10 if child_n >= 10 else child_n
for _ in range(_lowerCamelCase ):
_lowerCAmelCase : Optional[Any] = population_score[random.randint(0 , _lowerCamelCase )][0]
_lowerCAmelCase : Any = crossover(parent_a[0] , _lowerCamelCase )
# Append new string to the population list.
pop.append(mutate(_lowerCamelCase , _lowerCamelCase ) )
pop.append(mutate(_lowerCamelCase , _lowerCamelCase ) )
return pop
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = True ):
'''simple docstring'''
if N_POPULATION < N_SELECTED:
_lowerCAmelCase : List[Any] = F"{N_POPULATION} must be bigger than {N_SELECTED}"
raise ValueError(_lowerCamelCase )
# Verify that the target contains no genes besides the ones inside genes variable.
_lowerCAmelCase : List[str] = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
_lowerCAmelCase : Optional[int] = F"{not_in_genes_list} is not in genes list, evolution cannot converge"
raise ValueError(_lowerCamelCase )
# Generate random starting population.
_lowerCAmelCase : int = []
for _ in range(_lowerCamelCase ):
population.append("".join([random.choice(_lowerCamelCase ) for i in range(len(_lowerCamelCase ) )] ) )
# Just some logs to know what the algorithms is doing.
_lowerCAmelCase : List[Any] = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(_lowerCamelCase )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
_lowerCAmelCase : Tuple = [evaluate(_lowerCamelCase , _lowerCamelCase ) for item in population]
# Check if there is a matching evolution.
_lowerCAmelCase : Tuple = sorted(_lowerCamelCase , key=lambda _lowerCamelCase : x[1] , reverse=_lowerCamelCase )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
F"\nGeneration: {generation}"
F"\nTotal Population:{total_population}"
F"\nBest score: {population_score[0][1]}"
F"\nBest string: {population_score[0][0]}" )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
_lowerCAmelCase : str = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(_lowerCamelCase )
# Normalize population score to be between 0 and 1.
_lowerCAmelCase : Union[str, Any] = [
(item, score / len(_lowerCamelCase )) for item, score in population_score
]
# This is selection
for i in range(_lowerCamelCase ):
population.extend(select(population_score[int(_lowerCamelCase )] , _lowerCamelCase , _lowerCamelCase ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(_lowerCamelCase ) > N_POPULATION:
break
if __name__ == "__main__":
_snake_case = (
"This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!"
)
_snake_case = list(
" ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm"
"nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\"
)
_snake_case, _snake_case, _snake_case = basic(target_str, genes_list)
print(
f'''\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}'''
)
| 36
|
import math
import qiskit
def lowerCamelCase ( SCREAMING_SNAKE_CASE = 1 , SCREAMING_SNAKE_CASE = 1 , SCREAMING_SNAKE_CASE = 1 ):
'''simple docstring'''
if (
isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
or isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
or isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
):
raise TypeError('''inputs must be integers.''' )
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError('''inputs must be positive.''' )
if (
(math.floor(SCREAMING_SNAKE_CASE ) != input_a)
or (math.floor(SCREAMING_SNAKE_CASE ) != input_a)
or (math.floor(SCREAMING_SNAKE_CASE ) != carry_in)
):
raise ValueError('''inputs must be exact integers.''' )
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError('''inputs must be less or equal to 2.''' )
# build registers
__UpperCamelCase :List[str] = qiskit.QuantumRegister(4 , '''qr''' )
__UpperCamelCase :str = qiskit.ClassicalRegister(2 , '''cr''' )
# list the entries
__UpperCamelCase :Tuple = [input_a, input_a, carry_in]
__UpperCamelCase :Optional[int] = qiskit.QuantumCircuit(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for i in range(0 , 3 ):
if entry[i] == 2:
quantum_circuit.h(SCREAMING_SNAKE_CASE ) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(SCREAMING_SNAKE_CASE ) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(SCREAMING_SNAKE_CASE ) # for 0 entries
# build the circuit
quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate
quantum_circuit.cx(0 , 1 )
quantum_circuit.ccx(1 , 2 , 3 )
quantum_circuit.cx(1 , 2 )
quantum_circuit.cx(0 , 1 )
quantum_circuit.measure([2, 3] , SCREAMING_SNAKE_CASE ) # measure the last two qbits
__UpperCamelCase :Optional[Any] = qiskit.Aer.get_backend('''aer_simulator''' )
__UpperCamelCase :Tuple = qiskit.execute(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , shots=1_000 )
return job.result().get_counts(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
print(F'Total sum count for state is: {quantum_full_adder(1, 1, 1)}')
| 43
| 0
|
import os
import random
import sys
from . import cryptomath_module as cryptomath
from . import rabin_miller
__A = 3
def lowerCAmelCase_ ( __a ) -> Optional[Any]:
"""simple docstring"""
print("Generating primitive root of p" )
while True:
lowerCamelCase__: Tuple =random.randrange(3 , __a )
if pow(__a , 2 , __a ) == 1:
continue
if pow(__a , __a , __a ) == 1:
continue
return g
def lowerCAmelCase_ ( __a ) -> List[Any]:
"""simple docstring"""
print("Generating prime p..." )
lowerCamelCase__: List[Any] =rabin_miller.generate_large_prime(__a ) # select large prime number.
lowerCamelCase__: Optional[Any] =primitive_root(__a ) # one primitive root on modulo p.
lowerCamelCase__: Optional[int] =random.randrange(3 , __a ) # private_key -> have to be greater than 2 for safety.
lowerCamelCase__: List[Any] =cryptomath.find_mod_inverse(pow(__a , __a , __a ) , __a )
lowerCamelCase__: int =(key_size, e_a, e_a, p)
lowerCamelCase__: str =(key_size, d)
return public_key, private_key
def lowerCAmelCase_ ( __a , __a ) -> List[str]:
"""simple docstring"""
if os.path.exists(F"""{name}_pubkey.txt""" ) or os.path.exists(F"""{name}_privkey.txt""" ):
print("\nWARNING:" )
print(
F"""\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n"""
"Use a different name or delete these files and re-run this program." )
sys.exit()
lowerCamelCase__: str =generate_key(__a )
print(F"""\nWriting public key to file {name}_pubkey.txt...""" )
with open(F"""{name}_pubkey.txt""" , "w" ) as fo:
fo.write(F"""{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}""" )
print(F"""Writing private key to file {name}_privkey.txt...""" )
with open(F"""{name}_privkey.txt""" , "w" ) as fo:
fo.write(F"""{private_key[0]},{private_key[1]}""" )
def lowerCAmelCase_ ( ) -> Optional[int]:
"""simple docstring"""
print("Making key files..." )
make_key_files("elgamal" , 2048 )
print("Key files generation successful" )
if __name__ == "__main__":
main()
| 10
|
import random
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Optional[Any] = a[left_index]
__UpperCamelCase :Any = left_index + 1
for j in range(left_index + 1 , SCREAMING_SNAKE_CASE ):
if a[j] < pivot:
__UpperCamelCase , __UpperCamelCase :str = a[i], a[j]
i += 1
__UpperCamelCase , __UpperCamelCase :Optional[int] = a[i - 1], a[left_index]
return i - 1
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if left < right:
__UpperCamelCase :int = random.randint(SCREAMING_SNAKE_CASE , right - 1 )
__UpperCamelCase , __UpperCamelCase :List[str] = (
a[left],
a[pivot],
) # switches the pivot with the left most bound
__UpperCamelCase :Dict = partition(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
quick_sort_random(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # recursive quicksort to the left of the pivot point
quick_sort_random(
SCREAMING_SNAKE_CASE , pivot_index + 1 , SCREAMING_SNAKE_CASE ) # recursive quicksort to the right of the pivot point
def lowerCamelCase ( ):
'''simple docstring'''
__UpperCamelCase :Tuple = input('''Enter numbers separated by a comma:\n''' ).strip()
__UpperCamelCase :Union[str, Any] = [int(SCREAMING_SNAKE_CASE ) for item in user_input.split(''',''' )]
quick_sort_random(SCREAMING_SNAKE_CASE , 0 , len(SCREAMING_SNAKE_CASE ) )
print(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 43
| 0
|
"""simple docstring"""
import json
import os
import re
import unittest
from transformers import CodeGenTokenizer, CodeGenTokenizerFast
from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class a__ ( UpperCAmelCase_, unittest.TestCase ):
__lowerCAmelCase = CodeGenTokenizer
__lowerCAmelCase = CodeGenTokenizerFast
__lowerCAmelCase = True
__lowerCAmelCase = {"""add_prefix_space""": True}
__lowerCAmelCase = False
def __magic_name__ ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowercase : List[str] = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
'''<|endoftext|>''',
]
lowercase : List[str] = dict(zip(__lowercase , range(len(__lowercase ) ) ) )
lowercase : List[str] = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
lowercase : Union[str, Any] = {'''unk_token''': '''<unk>'''}
lowercase : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
lowercase : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(__lowercase ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(__lowercase ) )
def __magic_name__ ( self , **_a ):
kwargs.update(self.special_tokens_map )
return CodeGenTokenizer.from_pretrained(self.tmpdirname , **__lowercase )
def __magic_name__ ( self , **_a ):
kwargs.update(self.special_tokens_map )
return CodeGenTokenizerFast.from_pretrained(self.tmpdirname , **__lowercase )
def __magic_name__ ( self , _a ):
lowercase : List[Any] = '''lower newer'''
lowercase : List[str] = '''lower newer'''
return input_text, output_text
def __magic_name__ ( self ):
lowercase : List[str] = CodeGenTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowercase : Optional[Any] = '''lower newer'''
lowercase : Optional[int] = ['''\u0120low''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
lowercase : Tuple = tokenizer.tokenize(__lowercase , add_prefix_space=__lowercase )
self.assertListEqual(__lowercase , __lowercase )
lowercase : int = tokens + [tokenizer.unk_token]
lowercase : Optional[Any] = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowercase ) , __lowercase )
def __magic_name__ ( self ):
if not self.test_rust_tokenizer:
return
lowercase : str = self.get_tokenizer()
lowercase : int = self.get_rust_tokenizer(add_prefix_space=__lowercase )
lowercase : Tuple = '''lower newer'''
# Testing tokenization
lowercase : Dict = tokenizer.tokenize(__lowercase , add_prefix_space=__lowercase )
lowercase : Any = rust_tokenizer.tokenize(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
# Testing conversion to ids without special tokens
lowercase : Optional[Any] = tokenizer.encode(__lowercase , add_special_tokens=__lowercase , add_prefix_space=__lowercase )
lowercase : List[str] = rust_tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
self.assertListEqual(__lowercase , __lowercase )
# Testing conversion to ids with special tokens
lowercase : Tuple = self.get_rust_tokenizer(add_prefix_space=__lowercase )
lowercase : Any = tokenizer.encode(__lowercase , add_prefix_space=__lowercase )
lowercase : Dict = rust_tokenizer.encode(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
# Testing the unknown token
lowercase : Union[str, Any] = tokens + [rust_tokenizer.unk_token]
lowercase : List[Any] = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(__lowercase ) , __lowercase )
def __magic_name__ ( self , *_a , **_a ):
# It's very difficult to mix/test pretokenization with byte-level
# And get both CodeGen and Roberta to work at the same time (mostly an issue of adding a space before the string)
pass
def __magic_name__ ( self , _a=15 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowercase : int = self.rust_tokenizer_class.from_pretrained(__lowercase , **__lowercase )
# Simple input
lowercase : Dict = '''This is a simple input'''
lowercase : Union[str, Any] = ['''This is a simple input 1''', '''This is a simple input 2''']
lowercase : Any = ('''This is a simple input''', '''This is a pair''')
lowercase : Dict = [
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
self.assertRaises(__lowercase , tokenizer_r.encode , __lowercase , max_length=__lowercase , padding="max_length" )
# Simple input
self.assertRaises(__lowercase , tokenizer_r.encode_plus , __lowercase , max_length=__lowercase , padding="max_length" )
# Simple input
self.assertRaises(
__lowercase , tokenizer_r.batch_encode_plus , __lowercase , max_length=__lowercase , padding="max_length" , )
# Pair input
self.assertRaises(__lowercase , tokenizer_r.encode , __lowercase , max_length=__lowercase , padding="max_length" )
# Pair input
self.assertRaises(__lowercase , tokenizer_r.encode_plus , __lowercase , max_length=__lowercase , padding="max_length" )
# Pair input
self.assertRaises(
__lowercase , tokenizer_r.batch_encode_plus , __lowercase , max_length=__lowercase , padding="max_length" , )
def __magic_name__ ( self ):
lowercase : Tuple = CodeGenTokenizer.from_pretrained(self.tmpdirname , pad_token="<pad>" )
# Simple input
lowercase : int = '''This is a simple input'''
lowercase : Optional[Any] = ['''This is a simple input looooooooong''', '''This is a simple input''']
lowercase : Any = ('''This is a simple input''', '''This is a pair''')
lowercase : Tuple = [
('''This is a simple input loooooong''', '''This is a simple input'''),
('''This is a simple pair loooooong''', '''This is a simple pair'''),
]
lowercase : Tuple = tokenizer.pad_token_id
lowercase : Tuple = tokenizer(__lowercase , padding="max_length" , max_length=30 , return_tensors="np" )
lowercase : Optional[int] = tokenizer(__lowercase , padding=__lowercase , truncate=__lowercase , return_tensors="np" )
lowercase : int = tokenizer(*__lowercase , padding="max_length" , max_length=60 , return_tensors="np" )
lowercase : Dict = tokenizer(__lowercase , padding=__lowercase , truncate=__lowercase , return_tensors="np" )
# s
# test single string max_length padding
self.assertEqual(out_s["input_ids"].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s["input_ids"] )
self.assertTrue(0 in out_s["attention_mask"] )
# s2
# test automatic padding
self.assertEqual(out_sa["input_ids"].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["input_ids"][0] )
self.assertFalse(0 in out_sa["attention_mask"][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["input_ids"][1] )
self.assertTrue(0 in out_sa["attention_mask"][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p["input_ids"].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p["input_ids"] )
self.assertTrue(0 in out_p["attention_mask"] )
# p2
# test automatic padding pair
self.assertEqual(out_pa["input_ids"].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["input_ids"][0] )
self.assertFalse(0 in out_pa["attention_mask"][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["input_ids"][1] )
self.assertTrue(0 in out_pa["attention_mask"][1] )
def __magic_name__ ( self ):
lowercase : List[str] = '''$$$'''
lowercase : Optional[int] = CodeGenTokenizer.from_pretrained(self.tmpdirname , bos_token=__lowercase , add_bos_token=__lowercase )
lowercase : Any = '''This is a simple input'''
lowercase : Union[str, Any] = ['''This is a simple input 1''', '''This is a simple input 2''']
lowercase : str = tokenizer.bos_token_id
lowercase : int = tokenizer(__lowercase )
lowercase : Any = tokenizer(__lowercase )
self.assertEqual(out_s.input_ids[0] , __lowercase )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
lowercase : Any = tokenizer.decode(out_s.input_ids )
lowercase : List[Any] = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , __lowercase )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
@slow
def __magic_name__ ( self ):
lowercase : Tuple = CodeGenTokenizer.from_pretrained("Salesforce/codegen-350M-mono" )
lowercase : List[Any] = '''\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#'''
lowercase : Dict = '''\nif len_a > len_b: result = a\nelse: result = b'''
lowercase : List[Any] = tokenizer.encode(__lowercase )
lowercase : str = ['''^#''', re.escape("<|endoftext|>" ), '''^\'\'\'''', '''^"""''', '''\n\n\n''']
lowercase : Tuple = tokenizer.decode(__lowercase , truncate_before_pattern=__lowercase )
self.assertEqual(__lowercase , __lowercase )
def __magic_name__ ( self ):
pass
| 202
|
def lowerCamelCase ( SCREAMING_SNAKE_CASE = 1 , SCREAMING_SNAKE_CASE = 1_000 ):
'''simple docstring'''
__UpperCamelCase :Union[str, Any] = 1
__UpperCamelCase :Any = 0
for divide_by_number in range(SCREAMING_SNAKE_CASE , digit + 1 ):
__UpperCamelCase :list[int] = []
__UpperCamelCase :Optional[int] = numerator
for _ in range(1 , digit + 1 ):
if now_divide in has_been_divided:
if longest_list_length < len(SCREAMING_SNAKE_CASE ):
__UpperCamelCase :Optional[Any] = len(SCREAMING_SNAKE_CASE )
__UpperCamelCase :int = divide_by_number
else:
has_been_divided.append(SCREAMING_SNAKE_CASE )
__UpperCamelCase :Any = now_divide * 10 % divide_by_number
return the_digit
# Tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 43
| 0
|
from collections import deque
from math import floor
from random import random
from time import time
class snake_case__:
"""simple docstring"""
def __init__( self : Dict ):
lowercase__ : List[str] = {}
def snake_case ( self : str , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : int=1 ):
if self.graph.get(__lowercase ):
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
lowercase__ : Union[str, Any] = [[w, v]]
if not self.graph.get(__lowercase ):
lowercase__ : Optional[int] = []
def snake_case ( self : Optional[int] ):
return list(self.graph )
def snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Any ):
if self.graph.get(__lowercase ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(__lowercase )
def snake_case ( self : Dict , SCREAMING_SNAKE_CASE : Union[str, Any]=-2 , SCREAMING_SNAKE_CASE : Union[str, Any]=-1 ):
if s == d:
return []
lowercase__ : List[Any] = []
lowercase__ : str = []
if s == -2:
lowercase__ : str = list(self.graph )[0]
stack.append(__lowercase )
visited.append(__lowercase )
lowercase__ : Optional[Any] = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowercase__ : List[Any] = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(__lowercase )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
lowercase__ : List[Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(__lowercase ) != 0:
lowercase__ : Union[str, Any] = stack[len(__lowercase ) - 1]
else:
lowercase__ : int = ss
# check if se have reached the starting point
if len(__lowercase ) == 0:
return visited
def snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Optional[int]=-1 ):
if c == -1:
lowercase__ : Optional[int] = floor(random() * 10_000 ) + 10
for i in range(__lowercase ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
lowercase__ : List[str] = floor(random() * c ) + 1
if n != i:
self.add_pair(__lowercase , __lowercase , 1 )
def snake_case ( self : str , SCREAMING_SNAKE_CASE : Optional[Any]=-2 ):
lowercase__ : Optional[int] = deque()
lowercase__ : Dict = []
if s == -2:
lowercase__ : int = list(self.graph )[0]
d.append(__lowercase )
visited.append(__lowercase )
while d:
lowercase__ : Tuple = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def snake_case ( self : str , SCREAMING_SNAKE_CASE : Union[str, Any] ):
lowercase__ : Tuple = 0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def snake_case ( self : str , SCREAMING_SNAKE_CASE : int ):
return len(self.graph[u] )
def snake_case ( self : List[str] , SCREAMING_SNAKE_CASE : Optional[int]=-2 ):
lowercase__ : Dict = []
lowercase__ : int = []
if s == -2:
lowercase__ : Optional[Any] = list(self.graph )[0]
stack.append(__lowercase )
visited.append(__lowercase )
lowercase__ : int = s
lowercase__ : Dict = []
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowercase__ : Optional[int] = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
lowercase__ : Optional[Any] = node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop() )
if len(__lowercase ) != 0:
lowercase__ : Any = stack[len(__lowercase ) - 1]
else:
lowercase__ : List[Any] = ss
# check if se have reached the starting point
if len(__lowercase ) == 0:
return sorted_nodes
def snake_case ( self : Any ):
lowercase__ : int = []
lowercase__ : Dict = []
lowercase__ : Tuple = list(self.graph )[0]
stack.append(__lowercase )
visited.append(__lowercase )
lowercase__ : Any = -2
lowercase__ : Any = []
lowercase__ : List[str] = s
lowercase__ : int = False
lowercase__ : Optional[int] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowercase__ : Tuple = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
lowercase__ : List[str] = len(__lowercase ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
lowercase__ : Any = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
lowercase__ : List[Any] = True
if len(__lowercase ) != 0:
lowercase__ : Any = stack[len(__lowercase ) - 1]
else:
lowercase__ : List[str] = False
indirect_parents.append(__lowercase )
lowercase__ : Optional[int] = s
lowercase__ : Dict = ss
# check if se have reached the starting point
if len(__lowercase ) == 0:
return list(__lowercase )
def snake_case ( self : Optional[int] ):
lowercase__ : Tuple = []
lowercase__ : List[str] = []
lowercase__ : Dict = list(self.graph )[0]
stack.append(__lowercase )
visited.append(__lowercase )
lowercase__ : List[str] = -2
lowercase__ : str = []
lowercase__ : Optional[Any] = s
lowercase__ : int = False
lowercase__ : Optional[int] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowercase__ : List[str] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
lowercase__ : Dict = len(__lowercase ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
lowercase__ : List[Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
lowercase__ : List[Any] = True
if len(__lowercase ) != 0:
lowercase__ : int = stack[len(__lowercase ) - 1]
else:
lowercase__ : Union[str, Any] = False
indirect_parents.append(__lowercase )
lowercase__ : List[Any] = s
lowercase__ : Optional[Any] = ss
# check if se have reached the starting point
if len(__lowercase ) == 0:
return False
def snake_case ( self : int , SCREAMING_SNAKE_CASE : Optional[Any]=-2 , SCREAMING_SNAKE_CASE : List[Any]=-1 ):
lowercase__ : int = time()
self.dfs(__lowercase , __lowercase )
lowercase__ : int = time()
return end - begin
def snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : str=-2 ):
lowercase__ : Optional[int] = time()
self.bfs(__lowercase )
lowercase__ : List[Any] = time()
return end - begin
class snake_case__:
"""simple docstring"""
def __init__( self : str ):
lowercase__ : Union[str, Any] = {}
def snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : List[Any]=1 ):
# check if the u exists
if self.graph.get(__lowercase ):
# if there already is a edge
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
# if u does not exist
lowercase__ : Union[str, Any] = [[w, v]]
# add the other way
if self.graph.get(__lowercase ):
# if there already is a edge
if self.graph[v].count([w, u] ) == 0:
self.graph[v].append([w, u] )
else:
# if u does not exist
lowercase__ : Tuple = [[w, u]]
def snake_case ( self : List[str] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Tuple ):
if self.graph.get(__lowercase ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(__lowercase )
# the other way round
if self.graph.get(__lowercase ):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(__lowercase )
def snake_case ( self : int , SCREAMING_SNAKE_CASE : Optional[int]=-2 , SCREAMING_SNAKE_CASE : Optional[int]=-1 ):
if s == d:
return []
lowercase__ : Dict = []
lowercase__ : Any = []
if s == -2:
lowercase__ : Tuple = list(self.graph )[0]
stack.append(__lowercase )
visited.append(__lowercase )
lowercase__ : int = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowercase__ : Optional[Any] = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(__lowercase )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
lowercase__ : int = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(__lowercase ) != 0:
lowercase__ : List[Any] = stack[len(__lowercase ) - 1]
else:
lowercase__ : int = ss
# check if se have reached the starting point
if len(__lowercase ) == 0:
return visited
def snake_case ( self : str , SCREAMING_SNAKE_CASE : Optional[int]=-1 ):
if c == -1:
lowercase__ : Optional[int] = floor(random() * 10_000 ) + 10
for i in range(__lowercase ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
lowercase__ : List[Any] = floor(random() * c ) + 1
if n != i:
self.add_pair(__lowercase , __lowercase , 1 )
def snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Union[str, Any]=-2 ):
lowercase__ : Dict = deque()
lowercase__ : Any = []
if s == -2:
lowercase__ : Optional[Any] = list(self.graph )[0]
d.append(__lowercase )
visited.append(__lowercase )
while d:
lowercase__ : Optional[int] = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : str ):
return len(self.graph[u] )
def snake_case ( self : Dict ):
lowercase__ : int = []
lowercase__ : Dict = []
lowercase__ : Dict = list(self.graph )[0]
stack.append(__lowercase )
visited.append(__lowercase )
lowercase__ : Union[str, Any] = -2
lowercase__ : str = []
lowercase__ : Optional[int] = s
lowercase__ : Union[str, Any] = False
lowercase__ : Tuple = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowercase__ : Optional[int] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
lowercase__ : List[str] = len(__lowercase ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
lowercase__ : Dict = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
lowercase__ : Optional[Any] = True
if len(__lowercase ) != 0:
lowercase__ : Tuple = stack[len(__lowercase ) - 1]
else:
lowercase__ : Optional[int] = False
indirect_parents.append(__lowercase )
lowercase__ : List[str] = s
lowercase__ : str = ss
# check if se have reached the starting point
if len(__lowercase ) == 0:
return list(__lowercase )
def snake_case ( self : List[Any] ):
lowercase__ : Optional[Any] = []
lowercase__ : Any = []
lowercase__ : int = list(self.graph )[0]
stack.append(__lowercase )
visited.append(__lowercase )
lowercase__ : Any = -2
lowercase__ : List[str] = []
lowercase__ : Dict = s
lowercase__ : int = False
lowercase__ : str = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowercase__ : List[str] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
lowercase__ : Optional[Any] = len(__lowercase ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
lowercase__ : int = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
lowercase__ : Any = True
if len(__lowercase ) != 0:
lowercase__ : Optional[Any] = stack[len(__lowercase ) - 1]
else:
lowercase__ : List[Any] = False
indirect_parents.append(__lowercase )
lowercase__ : Tuple = s
lowercase__ : List[str] = ss
# check if se have reached the starting point
if len(__lowercase ) == 0:
return False
def snake_case ( self : Tuple ):
return list(self.graph )
def snake_case ( self : str , SCREAMING_SNAKE_CASE : int=-2 , SCREAMING_SNAKE_CASE : Optional[int]=-1 ):
lowercase__ : Optional[Any] = time()
self.dfs(__lowercase , __lowercase )
lowercase__ : List[str] = time()
return end - begin
def snake_case ( self : str , SCREAMING_SNAKE_CASE : Tuple=-2 ):
lowercase__ : Optional[int] = time()
self.bfs(__lowercase )
lowercase__ : Any = time()
return end - begin
| 130
|
import argparse
import json
from tqdm import tqdm
def lowerCamelCase ( ):
'''simple docstring'''
__UpperCamelCase :Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--src_path''' , type=SCREAMING_SNAKE_CASE , default='''biencoder-nq-dev.json''' , help='''Path to raw DPR training data''' , )
parser.add_argument(
'''--evaluation_set''' , type=SCREAMING_SNAKE_CASE , help='''where to store parsed evaluation_set file''' , )
parser.add_argument(
'''--gold_data_path''' , type=SCREAMING_SNAKE_CASE , help='''where to store parsed gold_data_path file''' , )
__UpperCamelCase :str = parser.parse_args()
with open(args.src_path , '''r''' ) as src_file, open(args.evaluation_set , '''w''' ) as eval_file, open(
args.gold_data_path , '''w''' ) as gold_file:
__UpperCamelCase :List[str] = json.load(SCREAMING_SNAKE_CASE )
for dpr_record in tqdm(SCREAMING_SNAKE_CASE ):
__UpperCamelCase :List[str] = dpr_record['''question''']
__UpperCamelCase :Tuple = [context['''title'''] for context in dpr_record['''positive_ctxs''']]
eval_file.write(question + '''\n''' )
gold_file.write('''\t'''.join(SCREAMING_SNAKE_CASE ) + '''\n''' )
if __name__ == "__main__":
main()
| 43
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A__ : Any = {
'configuration_electra': ['ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ElectraConfig', 'ElectraOnnxConfig'],
'tokenization_electra': ['ElectraTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Optional[int] = ['ElectraTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : List[Any] = [
'ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST',
'ElectraForCausalLM',
'ElectraForMaskedLM',
'ElectraForMultipleChoice',
'ElectraForPreTraining',
'ElectraForQuestionAnswering',
'ElectraForSequenceClassification',
'ElectraForTokenClassification',
'ElectraModel',
'ElectraPreTrainedModel',
'load_tf_weights_in_electra',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : List[Any] = [
'TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFElectraForMaskedLM',
'TFElectraForMultipleChoice',
'TFElectraForPreTraining',
'TFElectraForQuestionAnswering',
'TFElectraForSequenceClassification',
'TFElectraForTokenClassification',
'TFElectraModel',
'TFElectraPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Union[str, Any] = [
'FlaxElectraForCausalLM',
'FlaxElectraForMaskedLM',
'FlaxElectraForMultipleChoice',
'FlaxElectraForPreTraining',
'FlaxElectraForQuestionAnswering',
'FlaxElectraForSequenceClassification',
'FlaxElectraForTokenClassification',
'FlaxElectraModel',
'FlaxElectraPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
A__ : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 144
|
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
__lowercase = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
__lowercase = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
__lowercase = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1000))
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Any = len([g for position, g in enumerate(SCREAMING_SNAKE_CASE ) if g == main_target[position]] )
return (item, float(SCREAMING_SNAKE_CASE ))
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Optional[Any] = random.randint(0 , len(SCREAMING_SNAKE_CASE ) - 1 )
__UpperCamelCase :Tuple = parent_a[:random_slice] + parent_a[random_slice:]
__UpperCamelCase :Union[str, Any] = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :int = list(SCREAMING_SNAKE_CASE )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
__UpperCamelCase :str = random.choice(SCREAMING_SNAKE_CASE )
return "".join(SCREAMING_SNAKE_CASE )
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ):
'''simple docstring'''
__UpperCamelCase :int = []
# Generate more children proportionally to the fitness score.
__UpperCamelCase :int = int(parent_a[1] * 100 ) + 1
__UpperCamelCase :List[str] = 10 if child_n >= 10 else child_n
for _ in range(SCREAMING_SNAKE_CASE ):
__UpperCamelCase :Optional[Any] = population_score[random.randint(0 , SCREAMING_SNAKE_CASE )][0]
__UpperCamelCase , __UpperCamelCase :Any = crossover(parent_a[0] , SCREAMING_SNAKE_CASE )
# Append new string to the population list.
pop.append(mutate(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
pop.append(mutate(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
return pop
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = True ):
'''simple docstring'''
if N_POPULATION < N_SELECTED:
__UpperCamelCase :List[Any] = f"""{N_POPULATION} must be bigger than {N_SELECTED}"""
raise ValueError(SCREAMING_SNAKE_CASE )
# Verify that the target contains no genes besides the ones inside genes variable.
__UpperCamelCase :List[str] = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
__UpperCamelCase :Optional[int] = f"""{not_in_genes_list} is not in genes list, evolution cannot converge"""
raise ValueError(SCREAMING_SNAKE_CASE )
# Generate random starting population.
__UpperCamelCase :int = []
for _ in range(SCREAMING_SNAKE_CASE ):
population.append(''''''.join([random.choice(SCREAMING_SNAKE_CASE ) for i in range(len(SCREAMING_SNAKE_CASE ) )] ) )
# Just some logs to know what the algorithms is doing.
__UpperCamelCase , __UpperCamelCase :List[Any] = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(SCREAMING_SNAKE_CASE )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
__UpperCamelCase :Tuple = [evaluate(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for item in population]
# Check if there is a matching evolution.
__UpperCamelCase :Tuple = sorted(SCREAMING_SNAKE_CASE , key=lambda SCREAMING_SNAKE_CASE : x[1] , reverse=SCREAMING_SNAKE_CASE )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
f"""\nGeneration: {generation}"""
f"""\nTotal Population:{total_population}"""
f"""\nBest score: {population_score[0][1]}"""
f"""\nBest string: {population_score[0][0]}""" )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
__UpperCamelCase :str = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(SCREAMING_SNAKE_CASE )
# Normalize population score to be between 0 and 1.
__UpperCamelCase :Union[str, Any] = [
(item, score / len(SCREAMING_SNAKE_CASE )) for item, score in population_score
]
# This is selection
for i in range(SCREAMING_SNAKE_CASE ):
population.extend(select(population_score[int(SCREAMING_SNAKE_CASE )] , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(SCREAMING_SNAKE_CASE ) > N_POPULATION:
break
if __name__ == "__main__":
__lowercase = (
'''This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!'''
)
__lowercase = list(
''' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm'''
'''nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\'''
)
__lowercase , __lowercase , __lowercase = basic(target_str, genes_list)
print(
F'\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}'
)
| 43
| 0
|
"""simple docstring"""
import json
import os
from pathlib import Path
import pytest
from datasets.download.download_config import DownloadConfig
from datasets.download.download_manager import DownloadManager
from datasets.utils.file_utils import hash_url_to_filename
A_ = '''http://www.mocksite.com/file1.txt'''
A_ = '''"text": ["foo", "foo"]'''
A_ = '''6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8'''
class lowercase:
'''simple docstring'''
lowercase__ = 2_00
lowercase__ = {"""Content-Length""": """100"""}
lowercase__ = {}
def UpperCamelCase_ ( self: int, **a_: Dict ):
'''simple docstring'''
return [bytes(__lowercase, """utf-8""" )]
def UpperCAmelCase__ (*snake_case__ : Dict , **snake_case__ : List[str] ):
"""simple docstring"""
return MockResponse()
@pytest.mark.parametrize("""urls_type""" , [str, list, dict] )
def UpperCAmelCase__ (snake_case__ : Optional[int] , snake_case__ : Union[str, Any] , snake_case__ : Union[str, Any] ):
"""simple docstring"""
import requests
monkeypatch.setattr(snake_case__ , """request""" , snake_case__ )
_snake_case : Union[str, Any] = URL
if issubclass(snake_case__ , snake_case__ ):
_snake_case : Tuple = url
elif issubclass(snake_case__ , snake_case__ ):
_snake_case : Dict = [url]
elif issubclass(snake_case__ , snake_case__ ):
_snake_case : Dict = {'''train''': url}
_snake_case : Tuple = '''dummy'''
_snake_case : Optional[Any] = '''downloads'''
_snake_case : Any = tmp_path
_snake_case : Tuple = DownloadConfig(
cache_dir=os.path.join(snake_case__ , snake_case__ ) , use_etag=snake_case__ , )
_snake_case : Union[str, Any] = DownloadManager(dataset_name=snake_case__ , download_config=snake_case__ )
_snake_case : Optional[Any] = dl_manager.download(snake_case__ )
_snake_case : int = urls
for downloaded_paths in [downloaded_paths]:
if isinstance(snake_case__ , snake_case__ ):
_snake_case : Union[str, Any] = [downloaded_paths]
_snake_case : Tuple = [urls]
elif isinstance(snake_case__ , snake_case__ ):
assert "train" in downloaded_paths.keys()
_snake_case : Tuple = downloaded_paths.values()
_snake_case : int = urls.values()
assert downloaded_paths
for downloaded_path, input_url in zip(snake_case__ , snake_case__ ):
assert downloaded_path == dl_manager.downloaded_paths[input_url]
_snake_case : Optional[int] = Path(snake_case__ )
_snake_case : Tuple = downloaded_path.parts
assert parts[-1] == HASH
assert parts[-2] == cache_subdir
assert downloaded_path.exists()
_snake_case : Optional[int] = downloaded_path.read_text()
assert content == CONTENT
_snake_case : List[str] = downloaded_path.with_suffix(""".json""" )
assert metadata_downloaded_path.exists()
_snake_case : List[Any] = json.loads(metadata_downloaded_path.read_text() )
assert metadata_content == {"url": URL, "etag": None}
@pytest.mark.parametrize("""paths_type""" , [str, list, dict] )
def UpperCAmelCase__ (snake_case__ : Optional[int] , snake_case__ : Optional[int] , snake_case__ : Dict ):
"""simple docstring"""
_snake_case : Dict = str(snake_case__ )
if issubclass(snake_case__ , snake_case__ ):
_snake_case : Optional[Any] = filename
elif issubclass(snake_case__ , snake_case__ ):
_snake_case : Optional[Any] = [filename]
elif issubclass(snake_case__ , snake_case__ ):
_snake_case : int = {'''train''': filename}
_snake_case : Any = '''dummy'''
_snake_case : Dict = xz_file.parent
_snake_case : str = '''extracted'''
_snake_case : str = DownloadConfig(
cache_dir=snake_case__ , use_etag=snake_case__ , )
_snake_case : int = DownloadManager(dataset_name=snake_case__ , download_config=snake_case__ )
_snake_case : int = dl_manager.extract(snake_case__ )
_snake_case : Union[str, Any] = paths
for extracted_paths in [extracted_paths]:
if isinstance(snake_case__ , snake_case__ ):
_snake_case : Optional[int] = [extracted_paths]
_snake_case : Tuple = [paths]
elif isinstance(snake_case__ , snake_case__ ):
assert "train" in extracted_paths.keys()
_snake_case : Optional[int] = extracted_paths.values()
_snake_case : Dict = paths.values()
assert extracted_paths
for extracted_path, input_path in zip(snake_case__ , snake_case__ ):
assert extracted_path == dl_manager.extracted_paths[input_path]
_snake_case : Union[str, Any] = Path(snake_case__ )
_snake_case : Any = extracted_path.parts
assert parts[-1] == hash_url_to_filename(snake_case__ , etag=snake_case__ )
assert parts[-2] == extracted_subdir
assert extracted_path.exists()
_snake_case : int = extracted_path.read_text()
_snake_case : List[Any] = text_file.read_text()
assert extracted_file_content == expected_file_content
def UpperCAmelCase__ (snake_case__ : Dict , snake_case__ : Union[str, Any] ):
"""simple docstring"""
assert path.endswith(""".jsonl""" )
for num_items, line in enumerate(snake_case__ , start=1 ):
_snake_case : Tuple = json.loads(line.decode("""utf-8""" ) )
assert item.keys() == {"col_1", "col_2", "col_3"}
assert num_items == 4
@pytest.mark.parametrize("""archive_jsonl""" , ["""tar_jsonl_path""", """zip_jsonl_path"""] )
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : int ):
"""simple docstring"""
_snake_case : Dict = request.getfixturevalue(snake_case__ )
_snake_case : Optional[int] = DownloadManager()
for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(snake_case__ ) , start=1 ):
_test_jsonl(snake_case__ , snake_case__ )
assert num_jsonl == 2
@pytest.mark.parametrize("""archive_nested_jsonl""" , ["""tar_nested_jsonl_path""", """zip_nested_jsonl_path"""] )
def UpperCAmelCase__ (snake_case__ : Tuple , snake_case__ : Optional[Any] ):
"""simple docstring"""
_snake_case : Dict = request.getfixturevalue(snake_case__ )
_snake_case : int = DownloadManager()
for num_tar, (path, file) in enumerate(dl_manager.iter_archive(snake_case__ ) , start=1 ):
for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(snake_case__ ) , start=1 ):
_test_jsonl(snake_case__ , snake_case__ )
assert num_tar == 1
assert num_jsonl == 2
def UpperCAmelCase__ (snake_case__ : str ):
"""simple docstring"""
_snake_case : Union[str, Any] = DownloadManager()
for num_file, file in enumerate(dl_manager.iter_files(snake_case__ ) , start=1 ):
assert os.path.basename(snake_case__ ) == ("test.txt" if num_file == 1 else "train.txt")
assert num_file == 2
| 64
|
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
__lowercase = 16
__lowercase = 32
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 16 , SCREAMING_SNAKE_CASE = "bert-base-cased" ):
'''simple docstring'''
__UpperCamelCase :List[str] = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE )
__UpperCamelCase :Any = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(SCREAMING_SNAKE_CASE ):
# max_length=None => use the model max length (it's actually the default)
__UpperCamelCase :int = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
__UpperCamelCase :Tuple = datasets.map(
SCREAMING_SNAKE_CASE , batched=SCREAMING_SNAKE_CASE , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , load_from_cache_file=SCREAMING_SNAKE_CASE )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__UpperCamelCase :List[str] = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(SCREAMING_SNAKE_CASE ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(SCREAMING_SNAKE_CASE , padding='''max_length''' , max_length=128 , return_tensors='''pt''' )
return tokenizer.pad(SCREAMING_SNAKE_CASE , padding='''longest''' , return_tensors='''pt''' )
# Instantiate dataloaders.
__UpperCamelCase :Union[str, Any] = DataLoader(
tokenized_datasets['''train'''] , shuffle=SCREAMING_SNAKE_CASE , collate_fn=SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE )
__UpperCamelCase :Dict = DataLoader(
tokenized_datasets['''validation'''] , shuffle=SCREAMING_SNAKE_CASE , collate_fn=SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE )
return train_dataloader, eval_dataloader
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :List[Any] = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__UpperCamelCase :int = config['''lr''']
__UpperCamelCase :str = int(config['''num_epochs'''] )
__UpperCamelCase :Any = int(config['''seed'''] )
__UpperCamelCase :Dict = int(config['''batch_size'''] )
__UpperCamelCase :Optional[Any] = args.model_name_or_path
set_seed(SCREAMING_SNAKE_CASE )
__UpperCamelCase , __UpperCamelCase :Dict = get_dataloaders(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__UpperCamelCase :Any = AutoModelForSequenceClassification.from_pretrained(SCREAMING_SNAKE_CASE , return_dict=SCREAMING_SNAKE_CASE )
# Instantiate optimizer
__UpperCamelCase :List[str] = (
AdamW
if accelerator.state.deepspeed_plugin is None
or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
__UpperCamelCase :Optional[Any] = optimizer_cls(params=model.parameters() , lr=SCREAMING_SNAKE_CASE )
if accelerator.state.deepspeed_plugin is not None:
__UpperCamelCase :Dict = accelerator.state.deepspeed_plugin.deepspeed_config[
'''gradient_accumulation_steps'''
]
else:
__UpperCamelCase :Dict = 1
__UpperCamelCase :Tuple = (len(SCREAMING_SNAKE_CASE ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
__UpperCamelCase :str = get_linear_schedule_with_warmup(
optimizer=SCREAMING_SNAKE_CASE , num_warmup_steps=0 , num_training_steps=SCREAMING_SNAKE_CASE , )
else:
__UpperCamelCase :Dict = DummyScheduler(SCREAMING_SNAKE_CASE , total_num_steps=SCREAMING_SNAKE_CASE , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase :int = accelerator.prepare(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# We need to keep track of how many total steps we have iterated over
__UpperCamelCase :List[Any] = 0
# We also need to keep track of the stating epoch so files are named properly
__UpperCamelCase :Dict = 0
# Now we train the model
__UpperCamelCase :Any = evaluate.load('''glue''' , '''mrpc''' )
__UpperCamelCase :Union[str, Any] = 0
__UpperCamelCase :Optional[int] = {}
for epoch in range(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
model.train()
for step, batch in enumerate(SCREAMING_SNAKE_CASE ):
__UpperCamelCase :Optional[Any] = model(**SCREAMING_SNAKE_CASE )
__UpperCamelCase :Tuple = outputs.loss
__UpperCamelCase :str = loss / gradient_accumulation_steps
accelerator.backward(SCREAMING_SNAKE_CASE )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
__UpperCamelCase :Any = 0
for step, batch in enumerate(SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__UpperCamelCase :Any = model(**SCREAMING_SNAKE_CASE )
__UpperCamelCase :Optional[int] = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
__UpperCamelCase , __UpperCamelCase :List[Any] = accelerator.gather(
(predictions, batch['''labels''']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(SCREAMING_SNAKE_CASE ) - 1:
__UpperCamelCase :List[str] = predictions[: len(eval_dataloader.dataset ) - samples_seen]
__UpperCamelCase :Optional[int] = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=SCREAMING_SNAKE_CASE , references=SCREAMING_SNAKE_CASE , )
__UpperCamelCase :Dict = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , SCREAMING_SNAKE_CASE )
__UpperCamelCase :str = eval_metric['''accuracy''']
if best_performance < eval_metric["accuracy"]:
__UpperCamelCase :int = eval_metric['''accuracy''']
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), f"""Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}"""
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , '''all_results.json''' ) , '''w''' ) as f:
json.dump(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def lowerCamelCase ( ):
'''simple docstring'''
__UpperCamelCase :Tuple = argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''' )
parser.add_argument(
'''--model_name_or_path''' , type=SCREAMING_SNAKE_CASE , default='''bert-base-cased''' , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=SCREAMING_SNAKE_CASE , )
parser.add_argument(
'''--output_dir''' , type=SCREAMING_SNAKE_CASE , default='''.''' , help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' , )
parser.add_argument(
'''--performance_lower_bound''' , type=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , help='''Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.''' , )
parser.add_argument(
'''--num_epochs''' , type=SCREAMING_SNAKE_CASE , default=3 , help='''Number of train epochs.''' , )
__UpperCamelCase :List[str] = parser.parse_args()
__UpperCamelCase :Tuple = {'''lr''': 2e-5, '''num_epochs''': args.num_epochs, '''seed''': 42, '''batch_size''': 16}
training_function(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 43
| 0
|
"""simple docstring"""
import argparse
import json
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification
def _A ( lowercase ):
"""simple docstring"""
a =SwinConfig()
a =swin_name.split('''_''' )
a =name_split[1]
a =int(name_split[4] )
a =int(name_split[3][-1] )
if model_size == "tiny":
a =96
a =(2, 2, 6, 2)
a =(3, 6, 12, 24)
elif model_size == "small":
a =96
a =(2, 2, 18, 2)
a =(3, 6, 12, 24)
elif model_size == "base":
a =1_28
a =(2, 2, 18, 2)
a =(4, 8, 16, 32)
else:
a =1_92
a =(2, 2, 18, 2)
a =(6, 12, 24, 48)
if "in22k" in swin_name:
a =2_18_41
else:
a =10_00
a ='''huggingface/label-files'''
a ='''imagenet-1k-id2label.json'''
a =json.load(open(hf_hub_download(lowercase , lowercase , repo_type='''dataset''' ) , '''r''' ) )
a ={int(lowercase ): v for k, v in idalabel.items()}
a =idalabel
a ={v: k for k, v in idalabel.items()}
a =img_size
a =num_classes
a =embed_dim
a =depths
a =num_heads
a =window_size
return config
def _A ( lowercase ):
"""simple docstring"""
if "patch_embed.proj" in name:
a =name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
a =name.replace('''patch_embed.norm''' , '''embeddings.norm''' )
if "layers" in name:
a ='''encoder.''' + name
if "attn.proj" in name:
a =name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
a =name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
a =name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
a =name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
a =name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
a =name.replace('''mlp.fc2''' , '''output.dense''' )
if name == "norm.weight":
a ='''layernorm.weight'''
if name == "norm.bias":
a ='''layernorm.bias'''
if "head" in name:
a =name.replace('''head''' , '''classifier''' )
else:
a ='''swin.''' + name
return name
def _A ( lowercase , lowercase ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
a =orig_state_dict.pop(lowercase )
if "mask" in key:
continue
elif "qkv" in key:
a =key.split('''.''' )
a =int(key_split[1] )
a =int(key_split[3] )
a =model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
a =val[:dim, :]
a =val[
dim : dim * 2, :
]
a =val[-dim:, :]
else:
a =val[
:dim
]
a =val[
dim : dim * 2
]
a =val[
-dim:
]
else:
a =val
return orig_state_dict
def _A ( lowercase , lowercase ):
"""simple docstring"""
a =timm.create_model(lowercase , pretrained=lowercase )
timm_model.eval()
a =get_swin_config(lowercase )
a =SwinForImageClassification(lowercase )
model.eval()
a =convert_state_dict(timm_model.state_dict() , lowercase )
model.load_state_dict(lowercase )
a ='''http://images.cocodataset.org/val2017/000000039769.jpg'''
a =AutoImageProcessor.from_pretrained('''microsoft/{}'''.format(swin_name.replace('''_''' , '''-''' ) ) )
a =Image.open(requests.get(lowercase , stream=lowercase ).raw )
a =image_processor(images=lowercase , return_tensors='''pt''' )
a =timm_model(inputs['''pixel_values'''] )
a =model(**lowercase ).logits
assert torch.allclose(lowercase , lowercase , atol=1E-3 )
print(f'''Saving model {swin_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowercase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(lowercase )
if __name__ == "__main__":
lowerCamelCase_ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--swin_name""",
default="""swin_tiny_patch4_window7_224""",
type=str,
help="""Name of the Swin timm model you\'d like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
lowerCamelCase_ : Optional[int] = parser.parse_args()
convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
| 81
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__lowercase = logging.get_logger(__name__)
__lowercase = {
'''SenseTime/deformable-detr''': '''https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json''',
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
a__ : List[str] = """deformable_detr"""
a__ : Union[str, Any] = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self , __lowercase=True , __lowercase=None , __lowercase=3 , __lowercase=300 , __lowercase=1_024 , __lowercase=6 , __lowercase=1_024 , __lowercase=8 , __lowercase=6 , __lowercase=1_024 , __lowercase=8 , __lowercase=0.0 , __lowercase=True , __lowercase="relu" , __lowercase=256 , __lowercase=0.1 , __lowercase=0.0 , __lowercase=0.0 , __lowercase=0.02 , __lowercase=1.0 , __lowercase=True , __lowercase=False , __lowercase="sine" , __lowercase="resnet50" , __lowercase=True , __lowercase=False , __lowercase=4 , __lowercase=4 , __lowercase=4 , __lowercase=False , __lowercase=300 , __lowercase=False , __lowercase=1 , __lowercase=5 , __lowercase=2 , __lowercase=1 , __lowercase=1 , __lowercase=5 , __lowercase=2 , __lowercase=0.1 , __lowercase=0.25 , __lowercase=False , **__lowercase , ) -> int:
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''')
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''')
__UpperCamelCase :str = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''])
elif isinstance(__lowercase , __lowercase):
__UpperCamelCase :str = backbone_config.get('''model_type''')
__UpperCamelCase :Tuple = CONFIG_MAPPING[backbone_model_type]
__UpperCamelCase :Any = config_class.from_dict(__lowercase)
__UpperCamelCase :int = use_timm_backbone
__UpperCamelCase :Dict = backbone_config
__UpperCamelCase :Any = num_channels
__UpperCamelCase :Optional[int] = num_queries
__UpperCamelCase :Any = max_position_embeddings
__UpperCamelCase :str = d_model
__UpperCamelCase :Tuple = encoder_ffn_dim
__UpperCamelCase :Union[str, Any] = encoder_layers
__UpperCamelCase :List[Any] = encoder_attention_heads
__UpperCamelCase :Any = decoder_ffn_dim
__UpperCamelCase :List[str] = decoder_layers
__UpperCamelCase :int = decoder_attention_heads
__UpperCamelCase :str = dropout
__UpperCamelCase :Any = attention_dropout
__UpperCamelCase :int = activation_dropout
__UpperCamelCase :List[Any] = activation_function
__UpperCamelCase :List[Any] = init_std
__UpperCamelCase :List[Any] = init_xavier_std
__UpperCamelCase :int = encoder_layerdrop
__UpperCamelCase :str = auxiliary_loss
__UpperCamelCase :Optional[Any] = position_embedding_type
__UpperCamelCase :Union[str, Any] = backbone
__UpperCamelCase :Any = use_pretrained_backbone
__UpperCamelCase :str = dilation
# deformable attributes
__UpperCamelCase :Optional[Any] = num_feature_levels
__UpperCamelCase :str = encoder_n_points
__UpperCamelCase :int = decoder_n_points
__UpperCamelCase :Union[str, Any] = two_stage
__UpperCamelCase :Optional[Any] = two_stage_num_proposals
__UpperCamelCase :Dict = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError('''If two_stage is True, with_box_refine must be True.''')
# Hungarian matcher
__UpperCamelCase :Optional[int] = class_cost
__UpperCamelCase :List[Any] = bbox_cost
__UpperCamelCase :str = giou_cost
# Loss coefficients
__UpperCamelCase :Tuple = mask_loss_coefficient
__UpperCamelCase :Tuple = dice_loss_coefficient
__UpperCamelCase :int = bbox_loss_coefficient
__UpperCamelCase :Any = giou_loss_coefficient
__UpperCamelCase :Dict = eos_coefficient
__UpperCamelCase :Optional[Any] = focal_alpha
__UpperCamelCase :Optional[Any] = disable_custom_kernels
super().__init__(is_encoder_decoder=__lowercase , **__lowercase)
@property
def UpperCamelCase__ ( self) -> int:
return self.encoder_attention_heads
@property
def UpperCamelCase__ ( self) -> int:
return self.d_model
def UpperCamelCase__ ( self) -> List[Any]:
__UpperCamelCase :Dict = copy.deepcopy(self.__dict__)
if self.backbone_config is not None:
__UpperCamelCase :Tuple = self.backbone_config.to_dict()
__UpperCamelCase :List[Any] = self.__class__.model_type
return output
| 43
| 0
|
import unittest
from transformers import DonutProcessor
a__: List[Any] = 'naver-clova-ix/donut-base'
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def UpperCamelCase ( self ):
A__ = DonutProcessor.from_pretrained(__lowercase )
def UpperCamelCase ( self ):
A__ = {
'''name''': '''John Doe''',
'''age''': '''99''',
'''city''': '''Atlanta''',
'''state''': '''GA''',
'''zip''': '''30301''',
'''phone''': '''123-4567''',
'''nicknames''': [{'''nickname''': '''Johnny'''}, {'''nickname''': '''JD'''}],
}
A__ = (
'''<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>'''
'''<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>'''
'''<s_nicknames><s_nickname>Johnny</s_nickname>'''
'''<sep/><s_nickname>JD</s_nickname></s_nicknames>'''
)
A__ = self.processor.tokenajson(__lowercase )
self.assertDictEqual(__lowercase,__lowercase )
| 193
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
a__ : Optional[Any] = """facebook/bart-large-mnli"""
a__ : int = (
"""This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which """
"""should be the text to classify, and `labels`, which should be the list of labels to use for classification. """
"""It returns the most likely label in the list of provided `labels` for the input text."""
)
a__ : Optional[Any] = """text_classifier"""
a__ : Any = AutoTokenizer
a__ : str = AutoModelForSequenceClassification
a__ : str = ["""text""", ["""text"""]]
a__ : Optional[int] = ["""text"""]
def UpperCamelCase__ ( self) -> Union[str, Any]:
super().setup()
__UpperCamelCase :int = self.model.config
__UpperCamelCase :Optional[Any] = -1
for idx, label in config.idalabel.items():
if label.lower().startswith('''entail'''):
__UpperCamelCase :List[Any] = int(__lowercase)
if self.entailment_id == -1:
raise ValueError('''Could not determine the entailment ID from the model config, please pass it at init.''')
def UpperCamelCase__ ( self , __lowercase , __lowercase) -> Union[str, Any]:
__UpperCamelCase :Any = labels
return self.pre_processor(
[text] * len(__lowercase) , [f"""This example is {label}""" for label in labels] , return_tensors='''pt''' , padding='''max_length''' , )
def UpperCamelCase__ ( self , __lowercase) -> Optional[Any]:
__UpperCamelCase :List[Any] = outputs.logits
__UpperCamelCase :Any = torch.argmax(logits[:, 2]).item()
return self._labels[label_id]
| 43
| 0
|
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
_UpperCamelCase = {
'''vocab_file''': {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'''
),
},
}
_UpperCamelCase = {
'''vocab_file''': {
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'''
),
},
}
_UpperCamelCase = {
'''vocab_file''': {
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'''
),
},
}
_UpperCamelCase = {
'''facebook/dpr-ctx_encoder-single-nq-base''': 512,
'''facebook/dpr-ctx_encoder-multiset-base''': 512,
}
_UpperCamelCase = {
'''facebook/dpr-question_encoder-single-nq-base''': 512,
'''facebook/dpr-question_encoder-multiset-base''': 512,
}
_UpperCamelCase = {
'''facebook/dpr-reader-single-nq-base''': 512,
'''facebook/dpr-reader-multiset-base''': 512,
}
_UpperCamelCase = {
'''facebook/dpr-ctx_encoder-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-ctx_encoder-multiset-base''': {'''do_lower_case''': True},
}
_UpperCamelCase = {
'''facebook/dpr-question_encoder-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-question_encoder-multiset-base''': {'''do_lower_case''': True},
}
_UpperCamelCase = {
'''facebook/dpr-reader-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-reader-multiset-base''': {'''do_lower_case''': True},
}
class _lowerCamelCase ( UpperCAmelCase_ ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] =VOCAB_FILES_NAMES
UpperCAmelCase_ : Optional[int] =CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ : Tuple =CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase_ : List[str] =CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class _lowerCamelCase ( UpperCAmelCase_ ):
"""simple docstring"""
UpperCAmelCase_ : Dict =VOCAB_FILES_NAMES
UpperCAmelCase_ : Optional[int] =QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ : Optional[int] =QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase_ : Optional[Any] =QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
_UpperCamelCase = collections.namedtuple(
'''DPRSpanPrediction''', ['''span_score''', '''relevance_score''', '''doc_id''', '''start_index''', '''end_index''', '''text''']
)
_UpperCamelCase = collections.namedtuple('''DPRReaderOutput''', ['''start_logits''', '''end_logits''', '''relevance_logits'''])
_UpperCamelCase = R'''
Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.
It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),
using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`
with the format:
```
[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>
```
Args:
questions (`str` or `List[str]`):
The questions to be encoded. You can specify one question for many passages. In this case, the question
will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in
`titles` or `texts`.
titles (`str` or `List[str]`):
The passages titles to be encoded. This can be a string or a list of strings if there are several passages.
texts (`str` or `List[str]`):
The passages texts to be encoded. This can be a string or a list of strings if there are several passages.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
if provided).
- `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch
of pairs) is provided.
- `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the first
sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the
second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `\'tf\'`: Return TensorFlow `tf.constant` objects.
- `\'pt\'`: Return PyTorch `torch.Tensor` objects.
- `\'np\'`: Return Numpy `np.ndarray` objects.
return_attention_mask (`bool`, *optional*):
Whether or not to return the attention mask. If not set, will return the attention mask according to the
specific tokenizer\'s default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
Returns:
`Dict[str, List[List[int]]]`: A dictionary with the following keys:
- `input_ids`: List of token ids to be fed to a model.
- `attention_mask`: List of indices specifying which tokens should be attended to by the model.
'''
@add_start_docstrings(UpperCAmelCase_ )
class _lowerCamelCase :
"""simple docstring"""
def __call__( self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = False , UpperCAmelCase = False , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , **UpperCAmelCase , ) -> BatchEncoding:
'''simple docstring'''
if titles is None and texts is None:
return super().__call__(
__lowercase , padding=__lowercase , truncation=__lowercase , max_length=__lowercase , return_tensors=__lowercase , return_attention_mask=__lowercase , **__lowercase , )
elif titles is None or texts is None:
__snake_case : List[Any] = titles if texts is None else texts
return super().__call__(
__lowercase , __lowercase , padding=__lowercase , truncation=__lowercase , max_length=__lowercase , return_tensors=__lowercase , return_attention_mask=__lowercase , **__lowercase , )
__snake_case : List[Any] = titles if not isinstance(__lowercase , __lowercase ) else [titles]
__snake_case : int = texts if not isinstance(__lowercase , __lowercase ) else [texts]
__snake_case : Optional[Any] = len(__lowercase )
__snake_case : Union[str, Any] = questions if not isinstance(__lowercase , __lowercase ) else [questions] * n_passages
if len(__lowercase ) != len(__lowercase ):
raise ValueError(
F"""There should be as many titles than texts but got {len(__lowercase )} titles and {len(__lowercase )} texts.""" )
__snake_case : Optional[int] = super().__call__(__lowercase , __lowercase , padding=__lowercase , truncation=__lowercase )['''input_ids''']
__snake_case : Tuple = super().__call__(__lowercase , add_special_tokens=__lowercase , padding=__lowercase , truncation=__lowercase )['''input_ids''']
__snake_case : Optional[Any] = {
'''input_ids''': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(__lowercase , __lowercase )
]
}
if return_attention_mask is not False:
__snake_case : List[str] = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
__snake_case : Union[str, Any] = attention_mask
return self.pad(__lowercase , padding=__lowercase , max_length=__lowercase , return_tensors=__lowercase )
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = 16 , UpperCAmelCase = 64 , UpperCAmelCase = 4 , ) -> List[DPRSpanPrediction]:
'''simple docstring'''
__snake_case : Union[str, Any] = reader_input['''input_ids''']
__snake_case : Any = reader_output[:3]
__snake_case : Optional[Any] = len(__lowercase )
__snake_case : List[Any] = sorted(range(__lowercase ) , reverse=__lowercase , key=relevance_logits.__getitem__ )
__snake_case : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
__snake_case : Dict = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
__snake_case : List[Any] = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
__snake_case : Any = sequence_ids.index(self.pad_token_id )
else:
__snake_case : Optional[int] = len(__lowercase )
__snake_case : Dict = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=__lowercase , top_spans=__lowercase , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=__lowercase , start_index=__lowercase , end_index=__lowercase , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(__lowercase ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , ) -> List[DPRSpanPrediction]:
'''simple docstring'''
__snake_case : Optional[int] = []
for start_index, start_score in enumerate(__lowercase ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
__snake_case : Tuple = sorted(__lowercase , key=lambda UpperCAmelCase : x[1] , reverse=__lowercase )
__snake_case : str = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(F"""Wrong span indices: [{start_index}:{end_index}]""" )
__snake_case : List[str] = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(F"""Span is too long: {length} > {max_answer_length}""" )
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(__lowercase ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(UpperCAmelCase_ )
class _lowerCamelCase ( UpperCAmelCase_ , UpperCAmelCase_ ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] =VOCAB_FILES_NAMES
UpperCAmelCase_ : Dict =READER_PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ : Any =READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase_ : int =READER_PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase_ : int =["""input_ids""", """attention_mask"""]
| 326
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class lowerCamelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
a__ : int = StableUnCLIPImgaImgPipeline
a__ : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS
a__ : Union[str, Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
a__ : Optional[Any] = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
a__ : int = frozenset([] )
def UpperCamelCase__ ( self) -> Tuple:
__UpperCamelCase :Tuple = 32
__UpperCamelCase :Optional[int] = embedder_hidden_size
# image encoding components
__UpperCamelCase :Union[str, Any] = CLIPImageProcessor(crop_size=32 , size=32)
torch.manual_seed(0)
__UpperCamelCase :Union[str, Any] = CLIPVisionModelWithProjection(
CLIPVisionConfig(
hidden_size=__lowercase , projection_dim=__lowercase , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ))
# regular denoising components
torch.manual_seed(0)
__UpperCamelCase :str = StableUnCLIPImageNormalizer(embedding_dim=__lowercase)
__UpperCamelCase :Optional[int] = DDPMScheduler(beta_schedule='''squaredcos_cap_v2''')
torch.manual_seed(0)
__UpperCamelCase :Union[str, Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''')
torch.manual_seed(0)
__UpperCamelCase :Dict = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=__lowercase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ))
torch.manual_seed(0)
__UpperCamelCase :List[Any] = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='''projection''' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=__lowercase , layers_per_block=1 , upcast_attention=__lowercase , use_linear_projection=__lowercase , )
torch.manual_seed(0)
__UpperCamelCase :Tuple = DDIMScheduler(
beta_schedule='''scaled_linear''' , beta_start=0.0_00_85 , beta_end=0.0_12 , prediction_type='''v_prediction''' , set_alpha_to_one=__lowercase , steps_offset=1 , )
torch.manual_seed(0)
__UpperCamelCase :List[str] = AutoencoderKL()
__UpperCamelCase :Tuple = {
# image encoding components
'''feature_extractor''': feature_extractor,
'''image_encoder''': image_encoder.eval(),
# image noising components
'''image_normalizer''': image_normalizer.eval(),
'''image_noising_scheduler''': image_noising_scheduler,
# regular denoising components
'''tokenizer''': tokenizer,
'''text_encoder''': text_encoder.eval(),
'''unet''': unet.eval(),
'''scheduler''': scheduler,
'''vae''': vae.eval(),
}
return components
def UpperCamelCase__ ( self , __lowercase , __lowercase=0 , __lowercase=True) -> str:
if str(__lowercase).startswith('''mps'''):
__UpperCamelCase :Union[str, Any] = torch.manual_seed(__lowercase)
else:
__UpperCamelCase :int = torch.Generator(device=__lowercase).manual_seed(__lowercase)
__UpperCamelCase :int = floats_tensor((1, 3, 32, 32) , rng=random.Random(__lowercase)).to(__lowercase)
if pil_image:
__UpperCamelCase :List[Any] = input_image * 0.5 + 0.5
__UpperCamelCase :Optional[Any] = input_image.clamp(0 , 1)
__UpperCamelCase :int = input_image.cpu().permute(0 , 2 , 3 , 1).float().numpy()
__UpperCamelCase :Optional[Any] = DiffusionPipeline.numpy_to_pil(__lowercase)[0]
return {
"prompt": "An anime racoon running a marathon",
"image": input_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
@skip_mps
def UpperCamelCase__ ( self) -> Union[str, Any]:
__UpperCamelCase :Dict = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__UpperCamelCase :Tuple = self.get_dummy_components()
__UpperCamelCase :Any = StableUnCLIPImgaImgPipeline(**__lowercase)
__UpperCamelCase :Optional[Any] = sd_pipe.to(__lowercase)
sd_pipe.set_progress_bar_config(disable=__lowercase)
__UpperCamelCase :List[Any] = self.get_dummy_inputs(__lowercase)
inputs.update({'''image_embeds''': None})
__UpperCamelCase :Any = sd_pipe(**__lowercase).images
__UpperCamelCase :List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__UpperCamelCase :List[Any] = np.array([0.38_72, 0.72_24, 0.56_01, 0.47_41, 0.68_72, 0.58_14, 0.46_36, 0.38_67, 0.50_78])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
def UpperCamelCase__ ( self) -> str:
__UpperCamelCase :Optional[Any] = torch_device in ['''cpu''', '''mps''']
self._test_attention_slicing_forward_pass(test_max_difference=__lowercase)
def UpperCamelCase__ ( self) -> List[Any]:
__UpperCamelCase :Optional[Any] = torch_device in ['''cpu''', '''mps''']
self._test_inference_batch_single_identical(test_max_difference=__lowercase)
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def UpperCamelCase__ ( self) -> Union[str, Any]:
self._test_xformers_attention_forwardGenerator_pass(test_max_difference=__lowercase)
@slow
@require_torch_gpu
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self) -> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self) -> Union[str, Any]:
__UpperCamelCase :int = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''')
__UpperCamelCase :Any = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy''')
__UpperCamelCase :List[Any] = StableUnCLIPImgaImgPipeline.from_pretrained(
'''fusing/stable-unclip-2-1-l-img2img''' , torch_dtype=torch.floataa)
pipe.to(__lowercase)
pipe.set_progress_bar_config(disable=__lowercase)
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__UpperCamelCase :int = torch.Generator(device='''cpu''').manual_seed(0)
__UpperCamelCase :Dict = pipe(__lowercase , '''anime turle''' , generator=__lowercase , output_type='''np''')
__UpperCamelCase :Dict = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(__lowercase , __lowercase)
def UpperCamelCase__ ( self) -> List[str]:
__UpperCamelCase :Optional[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''')
__UpperCamelCase :Any = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy''')
__UpperCamelCase :Any = StableUnCLIPImgaImgPipeline.from_pretrained(
'''fusing/stable-unclip-2-1-h-img2img''' , torch_dtype=torch.floataa)
pipe.to(__lowercase)
pipe.set_progress_bar_config(disable=__lowercase)
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__UpperCamelCase :int = torch.Generator(device='''cpu''').manual_seed(0)
__UpperCamelCase :Optional[int] = pipe(__lowercase , '''anime turle''' , generator=__lowercase , output_type='''np''')
__UpperCamelCase :List[Any] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(__lowercase , __lowercase)
def UpperCamelCase__ ( self) -> List[str]:
__UpperCamelCase :Dict = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''')
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__UpperCamelCase :List[Any] = StableUnCLIPImgaImgPipeline.from_pretrained(
'''fusing/stable-unclip-2-1-h-img2img''' , torch_dtype=torch.floataa)
__UpperCamelCase :Union[str, Any] = pipe.to(__lowercase)
pipe.set_progress_bar_config(disable=__lowercase)
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__UpperCamelCase :Optional[Any] = pipe(
__lowercase , '''anime turtle''' , num_inference_steps=2 , output_type='''np''' , )
__UpperCamelCase :int = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 43
| 0
|
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from importlib import import_module
from typing import Dict, List, Optional, Tuple
import numpy as np
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch import nn
from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask
import transformers
from transformers import (
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
UpperCamelCase_ = logging.getLogger(__name__)
@dataclass
class _a :
'''simple docstring'''
A : str = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
A : Optional[str] = field(
default=UpperCAmelCase_ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
A : Optional[str] = field(
default='''NER''' , metadata={'''help''': '''Task type to fine tune in training (e.g. NER, POS, etc)'''} )
A : Optional[str] = field(
default=UpperCAmelCase_ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
A : bool = field(default=UpperCAmelCase_ , metadata={'''help''': '''Set this flag to use fast tokenization.'''} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
A : Optional[str] = field(
default=UpperCAmelCase_ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
@dataclass
class _a :
'''simple docstring'''
A : str = field(
metadata={'''help''': '''The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task.'''} )
A : Optional[str] = field(
default=UpperCAmelCase_ , metadata={'''help''': '''Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.'''} , )
A : int = field(
default=128 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
A : bool = field(
default=UpperCAmelCase_ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
def lowercase__( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
SCREAMING_SNAKE_CASE : Dict = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
SCREAMING_SNAKE_CASE : List[Any] = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. Use"
' --overwrite_output_dir to overcome.' )
SCREAMING_SNAKE_CASE : List[str] = import_module('tasks' )
try:
SCREAMING_SNAKE_CASE : Any = getattr(__UpperCamelCase ,model_args.task_type )
SCREAMING_SNAKE_CASE : TokenClassificationTask = token_classification_task_clazz()
except AttributeError:
raise ValueError(
f"Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. "
f"Available tasks classes are: {TokenClassificationTask.__subclasses__()}" )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' ,datefmt='%m/%d/%Y %H:%M:%S' ,level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN ,)
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' ,training_args.local_rank ,training_args.device ,training_args.n_gpu ,bool(training_args.local_rank != -1 ) ,training_args.fpaa ,)
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' ,__UpperCamelCase )
# Set seed
set_seed(training_args.seed )
# Prepare CONLL-2003 task
SCREAMING_SNAKE_CASE : Optional[Any] = token_classification_task.get_labels(data_args.labels )
SCREAMING_SNAKE_CASE : Dict[int, str] = dict(enumerate(__UpperCamelCase ) )
SCREAMING_SNAKE_CASE : List[str] = len(__UpperCamelCase )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
SCREAMING_SNAKE_CASE : int = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path ,num_labels=__UpperCamelCase ,idalabel=__UpperCamelCase ,labelaid={label: i for i, label in enumerate(__UpperCamelCase )} ,cache_dir=model_args.cache_dir ,)
SCREAMING_SNAKE_CASE : str = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path ,cache_dir=model_args.cache_dir ,use_fast=model_args.use_fast ,)
SCREAMING_SNAKE_CASE : int = AutoModelForTokenClassification.from_pretrained(
model_args.model_name_or_path ,from_tf=bool('.ckpt' in model_args.model_name_or_path ) ,config=__UpperCamelCase ,cache_dir=model_args.cache_dir ,)
# Get datasets
SCREAMING_SNAKE_CASE : List[str] = (
TokenClassificationDataset(
token_classification_task=__UpperCamelCase ,data_dir=data_args.data_dir ,tokenizer=__UpperCamelCase ,labels=__UpperCamelCase ,model_type=config.model_type ,max_seq_length=data_args.max_seq_length ,overwrite_cache=data_args.overwrite_cache ,mode=Split.train ,)
if training_args.do_train
else None
)
SCREAMING_SNAKE_CASE : Optional[Any] = (
TokenClassificationDataset(
token_classification_task=__UpperCamelCase ,data_dir=data_args.data_dir ,tokenizer=__UpperCamelCase ,labels=__UpperCamelCase ,model_type=config.model_type ,max_seq_length=data_args.max_seq_length ,overwrite_cache=data_args.overwrite_cache ,mode=Split.dev ,)
if training_args.do_eval
else None
)
def align_predictions(__UpperCamelCase: Tuple ,__UpperCamelCase: int ) -> Tuple[List[int], List[int]]:
SCREAMING_SNAKE_CASE : Dict = np.argmax(__UpperCamelCase ,axis=2 )
SCREAMING_SNAKE_CASE : Any = preds.shape
SCREAMING_SNAKE_CASE : List[str] = [[] for _ in range(__UpperCamelCase )]
SCREAMING_SNAKE_CASE : Optional[Any] = [[] for _ in range(__UpperCamelCase )]
for i in range(__UpperCamelCase ):
for j in range(__UpperCamelCase ):
if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index:
out_label_list[i].append(label_map[label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
return preds_list, out_label_list
def compute_metrics(__UpperCamelCase: Optional[Any] ) -> Dict:
SCREAMING_SNAKE_CASE : str = align_predictions(p.predictions ,p.label_ids )
return {
"accuracy_score": accuracy_score(__UpperCamelCase ,__UpperCamelCase ),
"precision": precision_score(__UpperCamelCase ,__UpperCamelCase ),
"recall": recall_score(__UpperCamelCase ,__UpperCamelCase ),
"f1": fa_score(__UpperCamelCase ,__UpperCamelCase ),
}
# Data collator
SCREAMING_SNAKE_CASE : str = DataCollatorWithPadding(__UpperCamelCase ,pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
SCREAMING_SNAKE_CASE : List[Any] = Trainer(
model=__UpperCamelCase ,args=__UpperCamelCase ,train_dataset=__UpperCamelCase ,eval_dataset=__UpperCamelCase ,compute_metrics=__UpperCamelCase ,data_collator=__UpperCamelCase ,)
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_process_zero():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
SCREAMING_SNAKE_CASE : Any = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
SCREAMING_SNAKE_CASE : Dict = trainer.evaluate()
SCREAMING_SNAKE_CASE : str = os.path.join(training_args.output_dir ,'eval_results.txt' )
if trainer.is_world_process_zero():
with open(__UpperCamelCase ,'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(' %s = %s' ,__UpperCamelCase ,__UpperCamelCase )
writer.write('%s = %s\n' % (key, value) )
results.update(__UpperCamelCase )
# Predict
if training_args.do_predict:
SCREAMING_SNAKE_CASE : str = TokenClassificationDataset(
token_classification_task=__UpperCamelCase ,data_dir=data_args.data_dir ,tokenizer=__UpperCamelCase ,labels=__UpperCamelCase ,model_type=config.model_type ,max_seq_length=data_args.max_seq_length ,overwrite_cache=data_args.overwrite_cache ,mode=Split.test ,)
SCREAMING_SNAKE_CASE : Optional[Any] = trainer.predict(__UpperCamelCase )
SCREAMING_SNAKE_CASE : Dict = align_predictions(__UpperCamelCase ,__UpperCamelCase )
SCREAMING_SNAKE_CASE : Tuple = os.path.join(training_args.output_dir ,'test_results.txt' )
if trainer.is_world_process_zero():
with open(__UpperCamelCase ,'w' ) as writer:
for key, value in metrics.items():
logger.info(' %s = %s' ,__UpperCamelCase ,__UpperCamelCase )
writer.write('%s = %s\n' % (key, value) )
# Save predictions
SCREAMING_SNAKE_CASE : Any = os.path.join(training_args.output_dir ,'test_predictions.txt' )
if trainer.is_world_process_zero():
with open(__UpperCamelCase ,'w' ) as writer:
with open(os.path.join(data_args.data_dir ,'test.txt' ) ,'r' ) as f:
token_classification_task.write_predictions_to_file(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
return results
def lowercase__( __UpperCamelCase: Dict ):
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 251
|
import numpy as np
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 1e-12 , SCREAMING_SNAKE_CASE = 100 , ):
'''simple docstring'''
assert np.shape(SCREAMING_SNAKE_CASE )[0] == np.shape(SCREAMING_SNAKE_CASE )[1]
# Ensure proper dimensionality.
assert np.shape(SCREAMING_SNAKE_CASE )[0] == np.shape(SCREAMING_SNAKE_CASE )[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(SCREAMING_SNAKE_CASE ) == np.iscomplexobj(SCREAMING_SNAKE_CASE )
__UpperCamelCase :List[Any] = np.iscomplexobj(SCREAMING_SNAKE_CASE )
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(SCREAMING_SNAKE_CASE , input_matrix.conj().T )
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
__UpperCamelCase :str = False
__UpperCamelCase :int = 0
__UpperCamelCase :Optional[Any] = 0
__UpperCamelCase :Union[str, Any] = 1e12
while not convergence:
# Multiple matrix by the vector.
__UpperCamelCase :List[str] = np.dot(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Normalize the resulting output vector.
__UpperCamelCase :Tuple = w / np.linalg.norm(SCREAMING_SNAKE_CASE )
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
__UpperCamelCase :int = vector.conj().T if is_complex else vector.T
__UpperCamelCase :Optional[int] = np.dot(SCREAMING_SNAKE_CASE , np.dot(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
# Check convergence.
__UpperCamelCase :Optional[Any] = np.abs(lambda_ - lambda_previous ) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
__UpperCamelCase :Dict = True
__UpperCamelCase :List[Any] = lambda_
if is_complex:
__UpperCamelCase :Tuple = np.real(lambda_ )
return lambda_, vector
def lowerCamelCase ( ):
'''simple docstring'''
__UpperCamelCase :int = np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]] )
__UpperCamelCase :Optional[Any] = np.array([41, 4, 20] )
__UpperCamelCase :Any = real_input_matrix.astype(np.complexaaa )
__UpperCamelCase :Dict = np.triu(1j * complex_input_matrix , 1 )
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
__UpperCamelCase :Optional[int] = np.array([41, 4, 20] ).astype(np.complexaaa )
for problem_type in ["real", "complex"]:
if problem_type == "real":
__UpperCamelCase :Any = real_input_matrix
__UpperCamelCase :int = real_vector
elif problem_type == "complex":
__UpperCamelCase :Tuple = complex_input_matrix
__UpperCamelCase :Optional[Any] = complex_vector
# Our implementation.
__UpperCamelCase , __UpperCamelCase :Dict = power_iteration(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
__UpperCamelCase , __UpperCamelCase :List[Any] = np.linalg.eigh(SCREAMING_SNAKE_CASE )
# Last eigenvalue is the maximum one.
__UpperCamelCase :List[Any] = eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
__UpperCamelCase :str = eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max ) <= 1e-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(SCREAMING_SNAKE_CASE ) - np.abs(SCREAMING_SNAKE_CASE ) ) <= 1e-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration()
| 43
| 0
|
'''simple docstring'''
import math
from typing import Any, Callable, List, Optional, Tuple, Union
import numpy as np
import torch
from ...models import TaFilmDecoder
from ...schedulers import DDPMScheduler
from ...utils import is_onnx_available, logging, randn_tensor
if is_onnx_available():
from ..onnx_utils import OnnxRuntimeModel
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
from .continous_encoder import SpectrogramContEncoder
from .notes_encoder import SpectrogramNotesEncoder
__a = logging.get_logger(__name__) # pylint: disable=invalid-name
__a = 256
class A__ ( UpperCAmelCase_ ):
"""simple docstring"""
UpperCamelCase_ : Tuple = ["""melgan"""]
def __init__( self : List[Any] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Any , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : str , ) -> None:
"""simple docstring"""
super().__init__()
# From MELGAN
_UpperCAmelCase : int = math.log(1e-5 ) # Matches MelGAN training.
_UpperCAmelCase : int = 4.0 # Largest value for most examples
_UpperCAmelCase : str = 1_2_8
self.register_modules(
notes_encoder=__lowercase , continuous_encoder=__lowercase , decoder=__lowercase , scheduler=__lowercase , melgan=__lowercase , )
def _lowerCAmelCase ( self : Dict , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Union[str, Any]=(-1.0, 1.0) , lowerCAmelCase__ : str=False ) -> Dict:
"""simple docstring"""
_UpperCAmelCase : str = output_range
if clip:
_UpperCAmelCase : Union[str, Any] = torch.clip(__lowercase , self.min_value , self.max_value )
# Scale to [0, 1].
_UpperCAmelCase : Union[str, Any] = (features - self.min_value) / (self.max_value - self.min_value)
# Scale to [min_out, max_out].
return zero_one * (max_out - min_out) + min_out
def _lowerCAmelCase ( self : Optional[int] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : str=(-1.0, 1.0) , lowerCAmelCase__ : Union[str, Any]=False ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase : int = input_range
_UpperCAmelCase : Optional[int] = torch.clip(__lowercase , __lowercase , __lowercase ) if clip else outputs
# Scale to [0, 1].
_UpperCAmelCase : List[str] = (outputs - min_out) / (max_out - min_out)
# Scale to [self.min_value, self.max_value].
return zero_one * (self.max_value - self.min_value) + self.min_value
def _lowerCAmelCase ( self : Optional[int] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Optional[int] ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase : List[str] = input_tokens > 0
_UpperCAmelCase : Union[str, Any] = self.notes_encoder(
encoder_input_tokens=__lowercase , encoder_inputs_mask=__lowercase )
_UpperCAmelCase : Union[str, Any] = self.continuous_encoder(
encoder_inputs=__lowercase , encoder_inputs_mask=__lowercase )
return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)]
def _lowerCAmelCase ( self : Tuple , lowerCAmelCase__ : Dict , lowerCAmelCase__ : int , lowerCAmelCase__ : Any ) -> str:
"""simple docstring"""
_UpperCAmelCase : Optional[int] = noise_time
if not torch.is_tensor(__lowercase ):
_UpperCAmelCase : str = torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device )
elif torch.is_tensor(__lowercase ) and len(timesteps.shape ) == 0:
_UpperCAmelCase : Dict = timesteps[None].to(input_tokens.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
_UpperCAmelCase : List[str] = timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device )
_UpperCAmelCase : Tuple = self.decoder(
encodings_and_masks=__lowercase , decoder_input_tokens=__lowercase , decoder_noise_time=__lowercase )
return logits
@torch.no_grad()
def __call__( self : int , lowerCAmelCase__ : Dict , lowerCAmelCase__ : int = None , lowerCAmelCase__ : int = 1_0_0 , lowerCAmelCase__ : Dict = True , lowerCAmelCase__ : str = "numpy" , lowerCAmelCase__ : int = None , lowerCAmelCase__ : Tuple = 1 , ) -> Union[AudioPipelineOutput, Tuple]:
"""simple docstring"""
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(__lowercase , __lowercase ) or callback_steps <= 0)
):
raise ValueError(
F"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
F""" {type(__lowercase )}.""" )
_UpperCAmelCase : Union[str, Any] = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa )
_UpperCAmelCase : Union[str, Any] = np.zeros([1, 0, self.n_dims] , np.floataa )
_UpperCAmelCase : Union[str, Any] = torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=__lowercase , device=self.device )
for i, encoder_input_tokens in enumerate(__lowercase ):
if i == 0:
_UpperCAmelCase : int = torch.from_numpy(pred_mel[:1].copy() ).to(
device=self.device , dtype=self.decoder.dtype )
# The first chunk has no previous context.
_UpperCAmelCase : int = torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=__lowercase , device=self.device )
else:
# The full song pipeline does not feed in a context feature, so the mask
# will be all 0s after the feature converter. Because we know we're
# feeding in a full context chunk from the previous prediction, set it
# to all 1s.
_UpperCAmelCase : Tuple = ones
_UpperCAmelCase : Optional[Any] = self.scale_features(
__lowercase , output_range=[-1.0, 1.0] , clip=__lowercase )
_UpperCAmelCase : int = self.encode(
input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ) , continuous_inputs=__lowercase , continuous_mask=__lowercase , )
# Sample encoder_continuous_inputs shaped gaussian noise to begin loop
_UpperCAmelCase : int = randn_tensor(
shape=encoder_continuous_inputs.shape , generator=__lowercase , device=self.device , dtype=self.decoder.dtype , )
# set step values
self.scheduler.set_timesteps(__lowercase )
# Denoising diffusion loop
for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
_UpperCAmelCase : Optional[int] = self.decode(
encodings_and_masks=__lowercase , input_tokens=__lowercase , noise_time=t / self.scheduler.config.num_train_timesteps , )
# Compute previous output: x_t -> x_t-1
_UpperCAmelCase : int = self.scheduler.step(__lowercase , __lowercase , __lowercase , generator=__lowercase ).prev_sample
_UpperCAmelCase : Tuple = self.scale_to_features(__lowercase , input_range=[-1.0, 1.0] )
_UpperCAmelCase : List[Any] = mel[:1]
_UpperCAmelCase : Optional[Any] = mel.cpu().float().numpy()
_UpperCAmelCase : Any = np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1 )
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(__lowercase , __lowercase )
logger.info("Generated segment" , __lowercase )
if output_type == "numpy" and not is_onnx_available():
raise ValueError(
"Cannot return output in \'np\' format if ONNX is not available. Make sure to have ONNX installed or set \'output_type\' to \'mel\'." )
elif output_type == "numpy" and self.melgan is None:
raise ValueError(
"Cannot return output in \'np\' format if melgan component is not defined. Make sure to define `self.melgan` or set \'output_type\' to \'mel\'." )
if output_type == "numpy":
_UpperCAmelCase : Optional[Any] = self.melgan(input_features=full_pred_mel.astype(np.floataa ) )
else:
_UpperCAmelCase : List[str] = full_pred_mel
if not return_dict:
return (output,)
return AudioPipelineOutput(audios=__lowercase )
| 145
|
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__lowercase = logging.get_logger(__name__)
__lowercase = {'''tokenizer_file''': '''tokenizer.json'''}
__lowercase = {
'''tokenizer_file''': {
'''bigscience/tokenizer''': '''https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json''',
'''bigscience/bloom-560m''': '''https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json''',
'''bigscience/bloom-1b1''': '''https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json''',
'''bigscience/bloom-1b7''': '''https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json''',
'''bigscience/bloom-3b''': '''https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json''',
'''bigscience/bloom-7b1''': '''https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json''',
'''bigscience/bloom''': '''https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json''',
},
}
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
a__ : int = VOCAB_FILES_NAMES
a__ : Tuple = PRETRAINED_VOCAB_FILES_MAP
a__ : List[str] = ["""input_ids""", """attention_mask"""]
a__ : int = None
def __init__( self , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase="<unk>" , __lowercase="<s>" , __lowercase="</s>" , __lowercase="<pad>" , __lowercase=False , __lowercase=False , **__lowercase , ) -> List[str]:
super().__init__(
__lowercase , __lowercase , tokenizer_file=__lowercase , unk_token=__lowercase , bos_token=__lowercase , eos_token=__lowercase , pad_token=__lowercase , add_prefix_space=__lowercase , clean_up_tokenization_spaces=__lowercase , **__lowercase , )
__UpperCamelCase :int = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
if pre_tok_state.get('''add_prefix_space''' , __lowercase) != add_prefix_space:
__UpperCamelCase :Any = getattr(__lowercase , pre_tok_state.pop('''type'''))
__UpperCamelCase :str = add_prefix_space
__UpperCamelCase :List[str] = pre_tok_class(**__lowercase)
__UpperCamelCase :Tuple = add_prefix_space
def UpperCamelCase__ ( self , *__lowercase , **__lowercase) -> BatchEncoding:
__UpperCamelCase :Tuple = kwargs.get('''is_split_into_words''' , __lowercase)
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"""
''' pretokenized inputs.''')
return super()._batch_encode_plus(*__lowercase , **__lowercase)
def UpperCamelCase__ ( self , *__lowercase , **__lowercase) -> BatchEncoding:
__UpperCamelCase :List[str] = kwargs.get('''is_split_into_words''' , __lowercase)
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"""
''' pretokenized inputs.''')
return super()._encode_plus(*__lowercase , **__lowercase)
def UpperCamelCase__ ( self , __lowercase , __lowercase = None) -> Tuple[str]:
__UpperCamelCase :Optional[Any] = self._tokenizer.model.save(__lowercase , name=__lowercase)
return tuple(__lowercase)
def UpperCamelCase__ ( self , __lowercase) -> List[int]:
__UpperCamelCase :str = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(__lowercase , add_special_tokens=__lowercase) + [self.eos_token_id])
if len(__lowercase) > self.model_max_length:
__UpperCamelCase :Any = input_ids[-self.model_max_length :]
return input_ids
| 43
| 0
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPSegProcessor, ViTImageProcessor
@require_vision
class UpperCAmelCase_ ( unittest.TestCase):
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = tempfile.mkdtemp()
# fmt: off
_lowerCAmelCase : int = ['''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>''']
# fmt: on
_lowerCAmelCase : Tuple = dict(zip(__lowercase, range(len(__lowercase))))
_lowerCAmelCase : List[Any] = ['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''', '''''']
_lowerCAmelCase : Optional[Any] = {'''unk_token''': '''<unk>'''}
_lowerCAmelCase : List[str] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"])
_lowerCAmelCase : List[Any] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["merges_file"])
with open(self.vocab_file, "w", encoding="utf-8") as fp:
fp.write(json.dumps(__lowercase) + "\n")
with open(self.merges_file, "w", encoding="utf-8") as fp:
fp.write("\n".join(__lowercase))
_lowerCAmelCase : List[str] = {
'''do_resize''': True,
'''size''': 20,
'''do_center_crop''': True,
'''crop_size''': 18,
'''do_normalize''': True,
'''image_mean''': [0.48_145_466, 0.4_578_275, 0.40_821_073],
'''image_std''': [0.26_862_954, 0.26_130_258, 0.27_577_711],
}
_lowerCAmelCase : Any = os.path.join(self.tmpdirname, __lowercase)
with open(self.image_processor_file, "w", encoding="utf-8") as fp:
json.dump(__lowercase, __lowercase)
def snake_case__ ( self, **__a):
'''simple docstring'''
return CLIPTokenizer.from_pretrained(self.tmpdirname, **__lowercase)
def snake_case__ ( self, **__a):
'''simple docstring'''
return CLIPTokenizerFast.from_pretrained(self.tmpdirname, **__lowercase)
def snake_case__ ( self, **__a):
'''simple docstring'''
return ViTImageProcessor.from_pretrained(self.tmpdirname, **__lowercase)
def snake_case__ ( self):
'''simple docstring'''
shutil.rmtree(self.tmpdirname)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = [np.random.randint(255, size=(3, 30, 400), dtype=np.uinta)]
_lowerCAmelCase : int = [Image.fromarray(np.moveaxis(__lowercase, 0, -1)) for x in image_inputs]
return image_inputs
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = self.get_tokenizer()
_lowerCAmelCase : str = self.get_rust_tokenizer()
_lowerCAmelCase : Optional[Any] = self.get_image_processor()
_lowerCAmelCase : List[Any] = CLIPSegProcessor(tokenizer=__lowercase, image_processor=__lowercase)
processor_slow.save_pretrained(self.tmpdirname)
_lowerCAmelCase : Optional[int] = CLIPSegProcessor.from_pretrained(self.tmpdirname, use_fast=__lowercase)
_lowerCAmelCase : Union[str, Any] = CLIPSegProcessor(tokenizer=__lowercase, image_processor=__lowercase)
processor_fast.save_pretrained(self.tmpdirname)
_lowerCAmelCase : Dict = CLIPSegProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor_slow.tokenizer.get_vocab(), tokenizer_slow.get_vocab())
self.assertEqual(processor_fast.tokenizer.get_vocab(), tokenizer_fast.get_vocab())
self.assertEqual(tokenizer_slow.get_vocab(), tokenizer_fast.get_vocab())
self.assertIsInstance(processor_slow.tokenizer, __lowercase)
self.assertIsInstance(processor_fast.tokenizer, __lowercase)
self.assertEqual(processor_slow.image_processor.to_json_string(), image_processor.to_json_string())
self.assertEqual(processor_fast.image_processor.to_json_string(), image_processor.to_json_string())
self.assertIsInstance(processor_slow.image_processor, __lowercase)
self.assertIsInstance(processor_fast.image_processor, __lowercase)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = CLIPSegProcessor(tokenizer=self.get_tokenizer(), image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
_lowerCAmelCase : int = self.get_tokenizer(bos_token="(BOS)", eos_token="(EOS)")
_lowerCAmelCase : List[Any] = self.get_image_processor(do_normalize=__lowercase, padding_value=1.0)
_lowerCAmelCase : Optional[Any] = CLIPSegProcessor.from_pretrained(
self.tmpdirname, bos_token="(BOS)", eos_token="(EOS)", do_normalize=__lowercase, padding_value=1.0)
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer, __lowercase)
self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor, __lowercase)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.get_image_processor()
_lowerCAmelCase : Optional[Any] = self.get_tokenizer()
_lowerCAmelCase : Dict = CLIPSegProcessor(tokenizer=__lowercase, image_processor=__lowercase)
_lowerCAmelCase : Dict = self.prepare_image_inputs()
_lowerCAmelCase : Union[str, Any] = image_processor(__lowercase, return_tensors="np")
_lowerCAmelCase : str = processor(images=__lowercase, return_tensors="np")
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1E-2)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.get_image_processor()
_lowerCAmelCase : Dict = self.get_tokenizer()
_lowerCAmelCase : str = CLIPSegProcessor(tokenizer=__lowercase, image_processor=__lowercase)
_lowerCAmelCase : Tuple = '''lower newer'''
_lowerCAmelCase : str = processor(text=__lowercase)
_lowerCAmelCase : Optional[Any] = tokenizer(__lowercase)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key], encoded_processor[key])
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : str = self.get_image_processor()
_lowerCAmelCase : Tuple = self.get_tokenizer()
_lowerCAmelCase : Optional[Any] = CLIPSegProcessor(tokenizer=__lowercase, image_processor=__lowercase)
_lowerCAmelCase : Optional[int] = '''lower newer'''
_lowerCAmelCase : str = self.prepare_image_inputs()
_lowerCAmelCase : Any = processor(text=__lowercase, images=__lowercase)
self.assertListEqual(list(inputs.keys()), ["input_ids", "attention_mask", "pixel_values"])
# test if it raises when no input is passed
with pytest.raises(__lowercase):
processor()
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : str = self.get_image_processor()
_lowerCAmelCase : Tuple = self.get_tokenizer()
_lowerCAmelCase : List[Any] = CLIPSegProcessor(tokenizer=__lowercase, image_processor=__lowercase)
_lowerCAmelCase : Optional[Any] = self.prepare_image_inputs()
_lowerCAmelCase : Optional[Any] = self.prepare_image_inputs()
_lowerCAmelCase : List[Any] = processor(images=__lowercase, visual_prompt=__lowercase)
self.assertListEqual(list(inputs.keys()), ["pixel_values", "conditional_pixel_values"])
# test if it raises when no input is passed
with pytest.raises(__lowercase):
processor()
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.get_image_processor()
_lowerCAmelCase : List[str] = self.get_tokenizer()
_lowerCAmelCase : Optional[Any] = CLIPSegProcessor(tokenizer=__lowercase, image_processor=__lowercase)
_lowerCAmelCase : int = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_lowerCAmelCase : Optional[Any] = processor.batch_decode(__lowercase)
_lowerCAmelCase : Any = tokenizer.batch_decode(__lowercase)
self.assertListEqual(__lowercase, __lowercase)
| 36
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase = logging.get_logger(__name__)
__lowercase = {'''ctrl''': '''https://huggingface.co/ctrl/resolve/main/config.json'''}
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
a__ : str = """ctrl"""
a__ : Dict = ["""past_key_values"""]
a__ : Tuple = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , __lowercase=246_534 , __lowercase=256 , __lowercase=1_280 , __lowercase=8_192 , __lowercase=48 , __lowercase=16 , __lowercase=0.1 , __lowercase=0.1 , __lowercase=1E-6 , __lowercase=0.02 , __lowercase=True , **__lowercase , ) -> List[Any]:
__UpperCamelCase :List[str] = vocab_size
__UpperCamelCase :Optional[Any] = n_positions
__UpperCamelCase :Dict = n_embd
__UpperCamelCase :Dict = n_layer
__UpperCamelCase :List[Any] = n_head
__UpperCamelCase :int = dff
__UpperCamelCase :Union[str, Any] = resid_pdrop
__UpperCamelCase :Optional[int] = embd_pdrop
__UpperCamelCase :List[Any] = layer_norm_epsilon
__UpperCamelCase :Dict = initializer_range
__UpperCamelCase :Any = use_cache
super().__init__(**__lowercase)
| 43
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
__A = {"configuration_vit": ["VIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ViTConfig", "ViTOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ["ViTFeatureExtractor"]
__A = ["ViTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"VIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"ViTForImageClassification",
"ViTForMaskedImageModeling",
"ViTModel",
"ViTPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"TFViTForImageClassification",
"TFViTModel",
"TFViTPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"FlaxViTForImageClassification",
"FlaxViTModel",
"FlaxViTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
__A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 10
|
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class lowerCamelCase_ ( UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
a__ : str = TextToVideoSDPipeline
a__ : Union[str, Any] = TEXT_TO_IMAGE_PARAMS
a__ : Tuple = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
a__ : int = frozenset(
[
"""num_inference_steps""",
"""generator""",
"""latents""",
"""return_dict""",
"""callback""",
"""callback_steps""",
] )
def UpperCamelCase__ ( self) -> Optional[Any]:
torch.manual_seed(0)
__UpperCamelCase :str = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock3D''', '''CrossAttnDownBlock3D''', '''CrossAttnDownBlock3D''', '''DownBlock3D''') , up_block_types=('''UpBlock3D''', '''CrossAttnUpBlock3D''', '''CrossAttnUpBlock3D''', '''CrossAttnUpBlock3D''') , cross_attention_dim=32 , attention_head_dim=4 , )
__UpperCamelCase :Optional[int] = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='''scaled_linear''' , clip_sample=__lowercase , set_alpha_to_one=__lowercase , )
torch.manual_seed(0)
__UpperCamelCase :Optional[int] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0)
__UpperCamelCase :Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act='''gelu''' , projection_dim=512 , )
__UpperCamelCase :Optional[Any] = CLIPTextModel(__lowercase)
__UpperCamelCase :Optional[int] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''')
__UpperCamelCase :Union[str, Any] = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
}
return components
def UpperCamelCase__ ( self , __lowercase , __lowercase=0) -> Optional[int]:
if str(__lowercase).startswith('''mps'''):
__UpperCamelCase :List[Any] = torch.manual_seed(__lowercase)
else:
__UpperCamelCase :Tuple = torch.Generator(device=__lowercase).manual_seed(__lowercase)
__UpperCamelCase :Dict = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''pt''',
}
return inputs
def UpperCamelCase__ ( self) -> Optional[Any]:
__UpperCamelCase :int = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__UpperCamelCase :Optional[int] = self.get_dummy_components()
__UpperCamelCase :Dict = TextToVideoSDPipeline(**__lowercase)
__UpperCamelCase :Any = sd_pipe.to(__lowercase)
sd_pipe.set_progress_bar_config(disable=__lowercase)
__UpperCamelCase :Optional[Any] = self.get_dummy_inputs(__lowercase)
__UpperCamelCase :int = '''np'''
__UpperCamelCase :List[str] = sd_pipe(**__lowercase).frames
__UpperCamelCase :Optional[Any] = frames[0][-3:, -3:, -1]
assert frames[0].shape == (64, 64, 3)
__UpperCamelCase :str = np.array([1_58.0, 1_60.0, 1_53.0, 1_25.0, 1_00.0, 1_21.0, 1_11.0, 93.0, 1_13.0])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def UpperCamelCase__ ( self) -> Tuple:
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=__lowercase , expected_max_diff=3E-3)
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def UpperCamelCase__ ( self) -> Optional[int]:
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=__lowercase , expected_max_diff=1E-2)
@unittest.skip(reason='''Batching needs to be properly figured out first for this pipeline.''')
def UpperCamelCase__ ( self) -> Union[str, Any]:
pass
@unittest.skip(reason='''Batching needs to be properly figured out first for this pipeline.''')
def UpperCamelCase__ ( self) -> Dict:
pass
@unittest.skip(reason='''`num_images_per_prompt` argument is not supported for this pipeline.''')
def UpperCamelCase__ ( self) -> str:
pass
def UpperCamelCase__ ( self) -> List[str]:
return super().test_progress_bar()
@slow
@skip_mps
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self) -> Dict:
__UpperCamelCase :Union[str, Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy''')
__UpperCamelCase :List[str] = TextToVideoSDPipeline.from_pretrained('''damo-vilab/text-to-video-ms-1.7b''')
__UpperCamelCase :Union[str, Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
__UpperCamelCase :str = pipe.to('''cuda''')
__UpperCamelCase :Optional[Any] = '''Spiderman is surfing'''
__UpperCamelCase :Union[str, Any] = torch.Generator(device='''cpu''').manual_seed(0)
__UpperCamelCase :List[Any] = pipe(__lowercase , generator=__lowercase , num_inference_steps=25 , output_type='''pt''').frames
__UpperCamelCase :Optional[int] = video_frames.cpu().numpy()
assert np.abs(expected_video - video).mean() < 5E-2
def UpperCamelCase__ ( self) -> int:
__UpperCamelCase :str = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy''')
__UpperCamelCase :Union[str, Any] = TextToVideoSDPipeline.from_pretrained('''damo-vilab/text-to-video-ms-1.7b''')
__UpperCamelCase :str = pipe.to('''cuda''')
__UpperCamelCase :Union[str, Any] = '''Spiderman is surfing'''
__UpperCamelCase :int = torch.Generator(device='''cpu''').manual_seed(0)
__UpperCamelCase :List[Any] = pipe(__lowercase , generator=__lowercase , num_inference_steps=2 , output_type='''pt''').frames
__UpperCamelCase :Optional[Any] = video_frames.cpu().numpy()
assert np.abs(expected_video - video).mean() < 5E-2
| 43
| 0
|
"""simple docstring"""
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def __magic_name__ ( __snake_case : Optional[Any] ) -> Optional[Any]:
lowercase : List[str] = filter(lambda __snake_case : p.requires_grad , model.parameters() )
lowercase : List[str] = sum([np.prod(p.size() ) for p in model_parameters] )
return params
_A : List[str] = logging.getLogger(__name__)
def __magic_name__ ( __snake_case : Dict , __snake_case : Optional[int] ) -> List[Any]:
if metric == "rouge2":
lowercase : Any = '''{val_avg_rouge2:.4f}-{step_count}'''
elif metric == "bleu":
lowercase : List[Any] = '''{val_avg_bleu:.4f}-{step_count}'''
elif metric == "em":
lowercase : Union[str, Any] = '''{val_avg_em:.4f}-{step_count}'''
else:
raise NotImplementedError(
f"""seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this"""
" function." )
lowercase : List[Any] = ModelCheckpoint(
dirpath=__snake_case , filename=__snake_case , monitor=f"""val_{metric}""" , mode="max" , save_top_k=3 , every_n_epochs=1 , )
return checkpoint_callback
def __magic_name__ ( __snake_case : Tuple , __snake_case : List[Any] ) -> Optional[Any]:
return EarlyStopping(
monitor=f"""val_{metric}""" , mode="min" if "loss" in metric else "max" , patience=__snake_case , verbose=__snake_case , )
class a__ ( pl.Callback ):
def __magic_name__ ( self , _a , _a ):
lowercase : List[str] = {f"""lr_group_{i}""": param['''lr'''] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(__lowercase )
@rank_zero_only
def __magic_name__ ( self , _a , _a , _a , _a=True ):
logger.info(f"""***** {type_path} results at step {trainer.global_step:05d} *****""" )
lowercase : Dict = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["log", "progress_bar", "preds"]} )
# Log results
lowercase : Tuple = Path(pl_module.hparams.output_dir )
if type_path == "test":
lowercase : str = od / '''test_results.txt'''
lowercase : Optional[Any] = od / '''test_generations.txt'''
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
lowercase : List[Any] = od / f"""{type_path}_results/{trainer.global_step:05d}.txt"""
lowercase : Optional[int] = od / f"""{type_path}_generations/{trainer.global_step:05d}.txt"""
results_file.parent.mkdir(exist_ok=__lowercase )
generations_file.parent.mkdir(exist_ok=__lowercase )
with open(__lowercase , "a+" ) as writer:
for key in sorted(__lowercase ):
if key in ["log", "progress_bar", "preds"]:
continue
lowercase : Union[str, Any] = metrics[key]
if isinstance(__lowercase , torch.Tensor ):
lowercase : List[Any] = val.item()
lowercase : Optional[Any] = f"""{key}: {val:.6f}\n"""
writer.write(__lowercase )
if not save_generations:
return
if "preds" in metrics:
lowercase : Union[str, Any] = '''\n'''.join(metrics["preds"] )
generations_file.open("w+" ).write(__lowercase )
@rank_zero_only
def __magic_name__ ( self , _a , _a ):
try:
lowercase : int = pl_module.model.model.num_parameters()
except AttributeError:
lowercase : Tuple = pl_module.model.num_parameters()
lowercase : Optional[Any] = count_trainable_parameters(__lowercase )
# mp stands for million parameters
trainer.logger.log_metrics({"n_params": npars, "mp": npars / 1E6, "grad_mp": n_trainable_pars / 1E6} )
@rank_zero_only
def __magic_name__ ( self , _a , _a ):
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(__lowercase , __lowercase , "test" )
@rank_zero_only
def __magic_name__ ( self , _a , _a ):
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 202
|
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Tuple = [0 for i in range(len(SCREAMING_SNAKE_CASE ) )]
# initialize interval's left pointer and right pointer
__UpperCamelCase , __UpperCamelCase :str = 0, 0
for i in range(1 , len(SCREAMING_SNAKE_CASE ) ):
# case when current index is inside the interval
if i <= right_pointer:
__UpperCamelCase :Union[str, Any] = min(right_pointer - i + 1 , z_result[i - left_pointer] )
__UpperCamelCase :Tuple = min_edge
while go_next(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
z_result[i] += 1
# if new index's result gives us more right interval,
# we've to update left_pointer and right_pointer
if i + z_result[i] - 1 > right_pointer:
__UpperCamelCase , __UpperCamelCase :Union[str, Any] = i, i + z_result[i] - 1
return z_result
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return i + z_result[i] < len(SCREAMING_SNAKE_CASE ) and s[z_result[i]] == s[i + z_result[i]]
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :List[Any] = 0
# concatenate 'pattern' and 'input_str' and call z_function
# with concatenated string
__UpperCamelCase :Tuple = z_function(pattern + input_str )
for val in z_result:
# if value is greater then length of the pattern string
# that means this index is starting position of substring
# which is equal to pattern string
if val >= len(SCREAMING_SNAKE_CASE ):
answer += 1
return answer
if __name__ == "__main__":
import doctest
doctest.testmod()
| 43
| 0
|
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : Tuple = [0 for i in range(len(lowerCamelCase__ ) )]
# initialize interval's left pointer and right pointer
lowercase__ : str = 0, 0
for i in range(1 , len(lowerCamelCase__ ) ):
# case when current index is inside the interval
if i <= right_pointer:
lowercase__ : Union[str, Any] = min(right_pointer - i + 1 , z_result[i - left_pointer] )
lowercase__ : Tuple = min_edge
while go_next(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
z_result[i] += 1
# if new index's result gives us more right interval,
# we've to update left_pointer and right_pointer
if i + z_result[i] - 1 > right_pointer:
lowercase__ : Union[str, Any] = i, i + z_result[i] - 1
return z_result
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
return i + z_result[i] < len(lowerCamelCase__ ) and s[z_result[i]] == s[i + z_result[i]]
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : List[Any] = 0
# concatenate 'pattern' and 'input_str' and call z_function
# with concatenated string
lowercase__ : Tuple = z_function(pattern + input_str )
for val in z_result:
# if value is greater then length of the pattern string
# that means this index is starting position of substring
# which is equal to pattern string
if val >= len(lowerCamelCase__ ):
answer += 1
return answer
if __name__ == "__main__":
import doctest
doctest.testmod()
| 130
|
import math
from typing import Any, Callable, List, Optional, Tuple, Union
import numpy as np
import torch
from ...models import TaFilmDecoder
from ...schedulers import DDPMScheduler
from ...utils import is_onnx_available, logging, randn_tensor
if is_onnx_available():
from ..onnx_utils import OnnxRuntimeModel
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
from .continous_encoder import SpectrogramContEncoder
from .notes_encoder import SpectrogramNotesEncoder
__lowercase = logging.get_logger(__name__) # pylint: disable=invalid-name
__lowercase = 256
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
a__ : Tuple = ["""melgan"""]
def __init__( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ) -> None:
super().__init__()
# From MELGAN
__UpperCamelCase :int = math.log(1E-5) # Matches MelGAN training.
__UpperCamelCase :int = 4.0 # Largest value for most examples
__UpperCamelCase :str = 128
self.register_modules(
notes_encoder=__lowercase , continuous_encoder=__lowercase , decoder=__lowercase , scheduler=__lowercase , melgan=__lowercase , )
def UpperCamelCase__ ( self , __lowercase , __lowercase=(-1.0, 1.0) , __lowercase=False) -> Dict:
__UpperCamelCase , __UpperCamelCase :str = output_range
if clip:
__UpperCamelCase :Union[str, Any] = torch.clip(__lowercase , self.min_value , self.max_value)
# Scale to [0, 1].
__UpperCamelCase :Union[str, Any] = (features - self.min_value) / (self.max_value - self.min_value)
# Scale to [min_out, max_out].
return zero_one * (max_out - min_out) + min_out
def UpperCamelCase__ ( self , __lowercase , __lowercase=(-1.0, 1.0) , __lowercase=False) -> Optional[int]:
__UpperCamelCase , __UpperCamelCase :int = input_range
__UpperCamelCase :Optional[int] = torch.clip(__lowercase , __lowercase , __lowercase) if clip else outputs
# Scale to [0, 1].
__UpperCamelCase :List[str] = (outputs - min_out) / (max_out - min_out)
# Scale to [self.min_value, self.max_value].
return zero_one * (self.max_value - self.min_value) + self.min_value
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase) -> List[Any]:
__UpperCamelCase :List[str] = input_tokens > 0
__UpperCamelCase , __UpperCamelCase :Union[str, Any] = self.notes_encoder(
encoder_input_tokens=__lowercase , encoder_inputs_mask=__lowercase)
__UpperCamelCase , __UpperCamelCase :Union[str, Any] = self.continuous_encoder(
encoder_inputs=__lowercase , encoder_inputs_mask=__lowercase)
return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)]
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase) -> str:
__UpperCamelCase :Optional[int] = noise_time
if not torch.is_tensor(__lowercase):
__UpperCamelCase :str = torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device)
elif torch.is_tensor(__lowercase) and len(timesteps.shape) == 0:
__UpperCamelCase :Dict = timesteps[None].to(input_tokens.device)
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
__UpperCamelCase :List[str] = timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device)
__UpperCamelCase :Tuple = self.decoder(
encodings_and_masks=__lowercase , decoder_input_tokens=__lowercase , decoder_noise_time=__lowercase)
return logits
@torch.no_grad()
def __call__( self , __lowercase , __lowercase = None , __lowercase = 100 , __lowercase = True , __lowercase = "numpy" , __lowercase = None , __lowercase = 1 , ) -> Union[AudioPipelineOutput, Tuple]:
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(__lowercase , __lowercase) or callback_steps <= 0)
):
raise ValueError(
f"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
f""" {type(__lowercase)}.""")
__UpperCamelCase :Union[str, Any] = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa)
__UpperCamelCase :Union[str, Any] = np.zeros([1, 0, self.n_dims] , np.floataa)
__UpperCamelCase :Union[str, Any] = torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=__lowercase , device=self.device)
for i, encoder_input_tokens in enumerate(__lowercase):
if i == 0:
__UpperCamelCase :int = torch.from_numpy(pred_mel[:1].copy()).to(
device=self.device , dtype=self.decoder.dtype)
# The first chunk has no previous context.
__UpperCamelCase :int = torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=__lowercase , device=self.device)
else:
# The full song pipeline does not feed in a context feature, so the mask
# will be all 0s after the feature converter. Because we know we're
# feeding in a full context chunk from the previous prediction, set it
# to all 1s.
__UpperCamelCase :Tuple = ones
__UpperCamelCase :Optional[Any] = self.scale_features(
__lowercase , output_range=[-1.0, 1.0] , clip=__lowercase)
__UpperCamelCase :int = self.encode(
input_tokens=torch.IntTensor([encoder_input_tokens]).to(device=self.device) , continuous_inputs=__lowercase , continuous_mask=__lowercase , )
# Sample encoder_continuous_inputs shaped gaussian noise to begin loop
__UpperCamelCase :int = randn_tensor(
shape=encoder_continuous_inputs.shape , generator=__lowercase , device=self.device , dtype=self.decoder.dtype , )
# set step values
self.scheduler.set_timesteps(__lowercase)
# Denoising diffusion loop
for j, t in enumerate(self.progress_bar(self.scheduler.timesteps)):
__UpperCamelCase :Optional[int] = self.decode(
encodings_and_masks=__lowercase , input_tokens=__lowercase , noise_time=t / self.scheduler.config.num_train_timesteps , )
# Compute previous output: x_t -> x_t-1
__UpperCamelCase :int = self.scheduler.step(__lowercase , __lowercase , __lowercase , generator=__lowercase).prev_sample
__UpperCamelCase :Tuple = self.scale_to_features(__lowercase , input_range=[-1.0, 1.0])
__UpperCamelCase :List[Any] = mel[:1]
__UpperCamelCase :Optional[Any] = mel.cpu().float().numpy()
__UpperCamelCase :Any = np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1)
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(__lowercase , __lowercase)
logger.info('''Generated segment''' , __lowercase)
if output_type == "numpy" and not is_onnx_available():
raise ValueError(
'''Cannot return output in \'np\' format if ONNX is not available. Make sure to have ONNX installed or set \'output_type\' to \'mel\'.''')
elif output_type == "numpy" and self.melgan is None:
raise ValueError(
'''Cannot return output in \'np\' format if melgan component is not defined. Make sure to define `self.melgan` or set \'output_type\' to \'mel\'.''')
if output_type == "numpy":
__UpperCamelCase :Optional[Any] = self.melgan(input_features=full_pred_mel.astype(np.floataa))
else:
__UpperCamelCase :List[str] = full_pred_mel
if not return_dict:
return (output,)
return AudioPipelineOutput(audios=__lowercase)
| 43
| 0
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
A__ : Union[str, Any] = logging.get_logger(__name__)
if is_vision_available():
import PIL
class lowercase__ ( UpperCAmelCase_ ):
_UpperCAmelCase :int = ["""pixel_values"""]
def __init__( self : Union[str, Any] , snake_case__ : Any = True , snake_case__ : Any = None , snake_case__ : List[Any] = PILImageResampling.BICUBIC , snake_case__ : str = True , snake_case__ : Any = None , snake_case__ : Optional[Any] = True , snake_case__ : int = 1 / 255 , snake_case__ : Dict = True , snake_case__ : List[Any] = None , snake_case__ : int = None , snake_case__ : Any = True , **snake_case__ : int , ):
super().__init__(**__lowercase )
lowerCamelCase_ : List[Any] =size if size is not None else {'''shortest_edge''': 224}
lowerCamelCase_ : List[str] =get_size_dict(__lowercase , default_to_square=__lowercase )
lowerCamelCase_ : Optional[Any] =crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
lowerCamelCase_ : str =get_size_dict(__lowercase , default_to_square=__lowercase , param_name="crop_size" )
lowerCamelCase_ : List[str] =do_resize
lowerCamelCase_ : Union[str, Any] =size
lowerCamelCase_ : Dict =resample
lowerCamelCase_ : Optional[Any] =do_center_crop
lowerCamelCase_ : Dict =crop_size
lowerCamelCase_ : List[str] =do_rescale
lowerCamelCase_ : Any =rescale_factor
lowerCamelCase_ : Any =do_normalize
lowerCamelCase_ : int =image_mean if image_mean is not None else OPENAI_CLIP_MEAN
lowerCamelCase_ : Union[str, Any] =image_std if image_std is not None else OPENAI_CLIP_STD
lowerCamelCase_ : Any =do_convert_rgb
def UpperCAmelCase__ ( self : Optional[int] , snake_case__ : Optional[Any] , snake_case__ : Union[str, Any] , snake_case__ : str = PILImageResampling.BICUBIC , snake_case__ : Any = None , **snake_case__ : Tuple , ):
lowerCamelCase_ : List[Any] =get_size_dict(__lowercase , default_to_square=__lowercase )
if "shortest_edge" not in size:
raise ValueError(F"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
lowerCamelCase_ : List[Any] =get_resize_output_image_size(__lowercase , size=size["shortest_edge"] , default_to_square=__lowercase )
return resize(__lowercase , size=__lowercase , resample=__lowercase , data_format=__lowercase , **__lowercase )
def UpperCAmelCase__ ( self : Union[str, Any] , snake_case__ : Dict , snake_case__ : str , snake_case__ : Optional[int] = None , **snake_case__ : Tuple , ):
lowerCamelCase_ : Optional[int] =get_size_dict(__lowercase )
if "height" not in size or "width" not in size:
raise ValueError(F"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""" )
return center_crop(__lowercase , size=(size["height"], size["width"]) , data_format=__lowercase , **__lowercase )
def UpperCAmelCase__ ( self : int , snake_case__ : Optional[int] , snake_case__ : List[str] , snake_case__ : Any = None , **snake_case__ : int , ):
return rescale(__lowercase , scale=__lowercase , data_format=__lowercase , **__lowercase )
def UpperCAmelCase__ ( self : List[str] , snake_case__ : str , snake_case__ : str , snake_case__ : str , snake_case__ : List[Any] = None , **snake_case__ : Dict , ):
return normalize(__lowercase , mean=__lowercase , std=__lowercase , data_format=__lowercase , **__lowercase )
def UpperCAmelCase__ ( self : Tuple , snake_case__ : Union[str, Any] , snake_case__ : Optional[Any] = None , snake_case__ : Union[str, Any] = None , snake_case__ : Optional[Any] = None , snake_case__ : Union[str, Any] = None , snake_case__ : Optional[Any] = None , snake_case__ : List[str] = None , snake_case__ : Union[str, Any] = None , snake_case__ : List[str] = None , snake_case__ : Dict = None , snake_case__ : Optional[int] = None , snake_case__ : List[str] = None , snake_case__ : List[Any] = None , snake_case__ : int = ChannelDimension.FIRST , **snake_case__ : Union[str, Any] , ):
lowerCamelCase_ : int =do_resize if do_resize is not None else self.do_resize
lowerCamelCase_ : Dict =size if size is not None else self.size
lowerCamelCase_ : Union[str, Any] =get_size_dict(__lowercase , param_name="size" , default_to_square=__lowercase )
lowerCamelCase_ : Optional[int] =resample if resample is not None else self.resample
lowerCamelCase_ : Dict =do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCamelCase_ : List[Any] =crop_size if crop_size is not None else self.crop_size
lowerCamelCase_ : Optional[Any] =get_size_dict(__lowercase , param_name="crop_size" , default_to_square=__lowercase )
lowerCamelCase_ : Union[str, Any] =do_rescale if do_rescale is not None else self.do_rescale
lowerCamelCase_ : List[str] =rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCamelCase_ : Optional[Any] =do_normalize if do_normalize is not None else self.do_normalize
lowerCamelCase_ : Union[str, Any] =image_mean if image_mean is not None else self.image_mean
lowerCamelCase_ : List[Any] =image_std if image_std is not None else self.image_std
lowerCamelCase_ : Optional[Any] =do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
lowerCamelCase_ : List[str] =make_list_of_images(__lowercase )
if not valid_images(__lowercase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
lowerCamelCase_ : Optional[Any] =[convert_to_rgb(__lowercase ) for image in images]
# All transformations expect numpy arrays.
lowerCamelCase_ : List[Any] =[to_numpy_array(__lowercase ) for image in images]
if do_resize:
lowerCamelCase_ : Optional[Any] =[self.resize(image=__lowercase , size=__lowercase , resample=__lowercase ) for image in images]
if do_center_crop:
lowerCamelCase_ : List[Any] =[self.center_crop(image=__lowercase , size=__lowercase ) for image in images]
if do_rescale:
lowerCamelCase_ : List[Any] =[self.rescale(image=__lowercase , scale=__lowercase ) for image in images]
if do_normalize:
lowerCamelCase_ : Optional[Any] =[self.normalize(image=__lowercase , mean=__lowercase , std=__lowercase ) for image in images]
lowerCamelCase_ : Dict =[to_channel_dimension_format(__lowercase , __lowercase ) for image in images]
lowerCamelCase_ : Dict ={'''pixel_values''': images}
return BatchFeature(data=__lowercase , tensor_type=__lowercase )
| 144
|
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
HubertConfig,
HubertForCTC,
HubertModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
__lowercase = logging.get_logger(__name__)
__lowercase = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
for attribute in key.split('''.''' ):
__UpperCamelCase :str = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if weight_type is not None:
__UpperCamelCase :Any = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).shape
else:
__UpperCamelCase :Union[str, Any] = hf_pointer.shape
assert hf_shape == value.shape, (
f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
__UpperCamelCase :str = value
elif weight_type == "weight_g":
__UpperCamelCase :List[str] = value
elif weight_type == "weight_v":
__UpperCamelCase :str = value
elif weight_type == "bias":
__UpperCamelCase :Union[str, Any] = value
else:
__UpperCamelCase :str = value
logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :List[Any] = []
__UpperCamelCase :int = fairseq_model.state_dict()
__UpperCamelCase :List[Any] = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
__UpperCamelCase :List[Any] = False
if "conv_layers" in name:
load_conv_layer(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , hf_model.config.feat_extract_norm == '''group''' , )
__UpperCamelCase :List[str] = True
else:
for key, mapped_key in MAPPING.items():
__UpperCamelCase :Dict = '''hubert.''' + mapped_key if (is_finetuned and mapped_key != '''lm_head''') else mapped_key
if key in name or (key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0] and not is_finetuned):
__UpperCamelCase :Optional[Any] = True
if "*" in mapped_key:
__UpperCamelCase :List[str] = name.split(SCREAMING_SNAKE_CASE )[0].split('''.''' )[-2]
__UpperCamelCase :Optional[int] = mapped_key.replace('''*''' , SCREAMING_SNAKE_CASE )
if "weight_g" in name:
__UpperCamelCase :int = '''weight_g'''
elif "weight_v" in name:
__UpperCamelCase :List[Any] = '''weight_v'''
elif "weight" in name:
__UpperCamelCase :Dict = '''weight'''
elif "bias" in name:
__UpperCamelCase :Dict = '''bias'''
else:
__UpperCamelCase :Dict = None
set_recursively(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
continue
if not is_used:
unused_weights.append(SCREAMING_SNAKE_CASE )
logger.warning(f"""Unused weights: {unused_weights}""" )
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Tuple = full_name.split('''conv_layers.''' )[-1]
__UpperCamelCase :Optional[int] = name.split('''.''' )
__UpperCamelCase :str = int(items[0] )
__UpperCamelCase :List[Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
__UpperCamelCase :Dict = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
__UpperCamelCase :Any = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
__UpperCamelCase :int = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
__UpperCamelCase :Union[str, Any] = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(SCREAMING_SNAKE_CASE )
@torch.no_grad()
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=True ):
'''simple docstring'''
if config_path is not None:
__UpperCamelCase :Tuple = HubertConfig.from_pretrained(SCREAMING_SNAKE_CASE )
else:
__UpperCamelCase :Optional[int] = HubertConfig()
if is_finetuned:
if dict_path:
__UpperCamelCase :Optional[int] = Dictionary.load(SCREAMING_SNAKE_CASE )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__UpperCamelCase :Optional[int] = target_dict.pad_index
__UpperCamelCase :Dict = target_dict.bos_index
__UpperCamelCase :str = target_dict.eos_index
__UpperCamelCase :Dict = len(target_dict.symbols )
__UpperCamelCase :List[Any] = os.path.join(SCREAMING_SNAKE_CASE , '''vocab.json''' )
if not os.path.isdir(SCREAMING_SNAKE_CASE ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(SCREAMING_SNAKE_CASE ) )
return
os.makedirs(SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE )
with open(SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(target_dict.indices , SCREAMING_SNAKE_CASE )
__UpperCamelCase :Optional[int] = WavaVecaCTCTokenizer(
SCREAMING_SNAKE_CASE , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=SCREAMING_SNAKE_CASE , )
__UpperCamelCase :Union[str, Any] = True if config.feat_extract_norm == '''layer''' else False
__UpperCamelCase :Any = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=SCREAMING_SNAKE_CASE , return_attention_mask=SCREAMING_SNAKE_CASE , )
__UpperCamelCase :Any = WavaVecaProcessor(feature_extractor=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE )
processor.save_pretrained(SCREAMING_SNAKE_CASE )
__UpperCamelCase :List[str] = HubertForCTC(SCREAMING_SNAKE_CASE )
else:
__UpperCamelCase :str = HubertModel(SCREAMING_SNAKE_CASE )
if is_finetuned:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase :int = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
else:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase :Any = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
__UpperCamelCase :Dict = model[0].eval()
recursively_load_weights(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
hf_wavavec.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__lowercase = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
__lowercase = parser.parse_args()
convert_hubert_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 43
| 0
|
"""simple docstring"""
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class lowercase( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
_snake_case : Dict = FlaxDiffusionPipeline.from_pretrained(
"""hf-internal-testing/tiny-stable-diffusion-pipe""", safety_checker=__lowercase, cache_dir=__lowercase )
_snake_case : int = [t[-1] for t in os.walk(os.path.join(__lowercase, os.listdir(__lowercase )[0], """snapshots""" ) )]
_snake_case : Tuple = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith(""".bin""" ) for f in files )
@slow
@require_flax
class lowercase( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
_snake_case : Tuple = FlaxStableDiffusionPipeline.from_pretrained(
"""hf-internal-testing/tiny-stable-diffusion-pipe""", safety_checker=__lowercase )
_snake_case : Optional[int] = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
_snake_case : Optional[Any] = jax.random.PRNGKey(0 )
_snake_case : str = 4
_snake_case : Union[str, Any] = jax.device_count()
_snake_case : Dict = num_samples * [prompt]
_snake_case : Optional[int] = pipeline.prepare_inputs(__lowercase )
# shard inputs and rng
_snake_case : str = replicate(__lowercase )
_snake_case : Optional[Any] = jax.random.split(__lowercase, __lowercase )
_snake_case : Optional[Any] = shard(__lowercase )
_snake_case : Optional[Any] = pipeline(__lowercase, __lowercase, __lowercase, __lowercase, jit=__lowercase ).images
assert images.shape == (num_samples, 1, 64, 64, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:], dtype=np.floataa ).sum() - 4.1_514_745 ) < 1E-3
assert np.abs(np.abs(__lowercase, dtype=np.floataa ).sum() - 49_947.875 ) < 5E-1
_snake_case : Optional[int] = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(__lowercase ) == num_samples
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
_snake_case : Dict = FlaxStableDiffusionPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""", revision="""flax""", safety_checker=__lowercase )
_snake_case : List[str] = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
_snake_case : List[Any] = jax.random.PRNGKey(0 )
_snake_case : str = 50
_snake_case : Union[str, Any] = jax.device_count()
_snake_case : Any = num_samples * [prompt]
_snake_case : Dict = pipeline.prepare_inputs(__lowercase )
# shard inputs and rng
_snake_case : List[str] = replicate(__lowercase )
_snake_case : List[Any] = jax.random.split(__lowercase, __lowercase )
_snake_case : str = shard(__lowercase )
_snake_case : List[str] = pipeline(__lowercase, __lowercase, __lowercase, __lowercase, jit=__lowercase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:], dtype=np.floataa ).sum() - 0.05_652_401) ) < 1E-3
assert np.abs((np.abs(__lowercase, dtype=np.floataa ).sum() - 2_383_808.2) ) < 5E-1
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
_snake_case : List[str] = FlaxStableDiffusionPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""", revision="""bf16""", dtype=jnp.bfloataa, safety_checker=__lowercase )
_snake_case : Optional[int] = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
_snake_case : str = jax.random.PRNGKey(0 )
_snake_case : Dict = 50
_snake_case : Optional[Any] = jax.device_count()
_snake_case : Optional[int] = num_samples * [prompt]
_snake_case : Optional[int] = pipeline.prepare_inputs(__lowercase )
# shard inputs and rng
_snake_case : List[str] = replicate(__lowercase )
_snake_case : Optional[Any] = jax.random.split(__lowercase, __lowercase )
_snake_case : Optional[int] = shard(__lowercase )
_snake_case : Optional[Any] = pipeline(__lowercase, __lowercase, __lowercase, __lowercase, jit=__lowercase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:], dtype=np.floataa ).sum() - 0.04_003_906) ) < 1E-3
assert np.abs((np.abs(__lowercase, dtype=np.floataa ).sum() - 2_373_516.75) ) < 5E-1
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
_snake_case : Optional[Any] = FlaxStableDiffusionPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""", revision="""bf16""", dtype=jnp.bfloataa )
_snake_case : List[Any] = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
_snake_case : List[Any] = jax.random.PRNGKey(0 )
_snake_case : List[Any] = 50
_snake_case : Any = jax.device_count()
_snake_case : List[str] = num_samples * [prompt]
_snake_case : Tuple = pipeline.prepare_inputs(__lowercase )
# shard inputs and rng
_snake_case : Any = replicate(__lowercase )
_snake_case : Optional[int] = jax.random.split(__lowercase, __lowercase )
_snake_case : Dict = shard(__lowercase )
_snake_case : Optional[Any] = pipeline(__lowercase, __lowercase, __lowercase, __lowercase, jit=__lowercase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:], dtype=np.floataa ).sum() - 0.04_003_906) ) < 1E-3
assert np.abs((np.abs(__lowercase, dtype=np.floataa ).sum() - 2_373_516.75) ) < 5E-1
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
_snake_case : Tuple = FlaxDDIMScheduler(
beta_start=0.00_085, beta_end=0.012, beta_schedule="""scaled_linear""", set_alpha_to_one=__lowercase, steps_offset=1, )
_snake_case : Any = FlaxStableDiffusionPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""", revision="""bf16""", dtype=jnp.bfloataa, scheduler=__lowercase, safety_checker=__lowercase, )
_snake_case : str = scheduler.create_state()
_snake_case : Any = scheduler_state
_snake_case : Any = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
_snake_case : Union[str, Any] = jax.random.PRNGKey(0 )
_snake_case : Any = 50
_snake_case : str = jax.device_count()
_snake_case : Optional[int] = num_samples * [prompt]
_snake_case : List[str] = pipeline.prepare_inputs(__lowercase )
# shard inputs and rng
_snake_case : Optional[int] = replicate(__lowercase )
_snake_case : List[Any] = jax.random.split(__lowercase, __lowercase )
_snake_case : int = shard(__lowercase )
_snake_case : List[Any] = pipeline(__lowercase, __lowercase, __lowercase, __lowercase, jit=__lowercase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:], dtype=np.floataa ).sum() - 0.045_043_945) ) < 1E-3
assert np.abs((np.abs(__lowercase, dtype=np.floataa ).sum() - 2_347_693.5) ) < 5E-1
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
_snake_case : Dict = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
_snake_case : Optional[int] = jax.device_count()
_snake_case : int = num_samples * [prompt]
_snake_case : Optional[Any] = jax.random.split(jax.random.PRNGKey(0 ), __lowercase )
_snake_case : Dict = FlaxStableDiffusionPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""", revision="""bf16""", dtype=jnp.bfloataa, safety_checker=__lowercase, )
_snake_case : List[str] = replicate(__lowercase )
_snake_case : Optional[int] = pipeline.prepare_inputs(__lowercase )
_snake_case : List[Any] = shard(__lowercase )
_snake_case : str = pipeline(__lowercase, __lowercase, __lowercase, jit=__lowercase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
_snake_case : int = images[2, 0, 256, 10:17, 1]
# With memory efficient attention
_snake_case : Dict = FlaxStableDiffusionPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""", revision="""bf16""", dtype=jnp.bfloataa, safety_checker=__lowercase, use_memory_efficient_attention=__lowercase, )
_snake_case : List[str] = replicate(__lowercase )
_snake_case : Dict = pipeline.prepare_inputs(__lowercase )
_snake_case : Union[str, Any] = shard(__lowercase )
_snake_case : str = pipeline(__lowercase, __lowercase, __lowercase, jit=__lowercase ).images
assert images_eff.shape == (num_samples, 1, 512, 512, 3)
_snake_case : Union[str, Any] = images[2, 0, 256, 10:17, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1E-2
| 64
|
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
import numpy as np
# Parrameters
__lowercase = (720, 1280) # Height, Width
__lowercase = (0.4, 0.6) # if height or width lower than this scale, drop it.
__lowercase = 1 / 100
__lowercase = ''''''
__lowercase = ''''''
__lowercase = ''''''
__lowercase = 250
def lowerCamelCase ( ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase :List[Any] = get_dataset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for index in range(SCREAMING_SNAKE_CASE ):
__UpperCamelCase :Optional[Any] = random.sample(range(len(SCREAMING_SNAKE_CASE ) ) , 4 )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase :str = update_image_and_anno(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , filter_scale=SCREAMING_SNAKE_CASE , )
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
__UpperCamelCase :List[Any] = random_chars(32 )
__UpperCamelCase :List[str] = path.split(os.sep )[-1].rsplit('''.''' , 1 )[0]
__UpperCamelCase :Tuple = f"""{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}"""
cva.imwrite(f"""{file_root}.jpg""" , SCREAMING_SNAKE_CASE , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(f"""Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}""" )
__UpperCamelCase :Optional[Any] = []
for anno in new_annos:
__UpperCamelCase :int = anno[3] - anno[1]
__UpperCamelCase :Optional[int] = anno[4] - anno[2]
__UpperCamelCase :int = anno[1] + width / 2
__UpperCamelCase :List[str] = anno[2] + height / 2
__UpperCamelCase :str = f"""{anno[0]} {x_center} {y_center} {width} {height}"""
annos_list.append(SCREAMING_SNAKE_CASE )
with open(f"""{file_root}.txt""" , '''w''' ) as outfile:
outfile.write('''\n'''.join(line for line in annos_list ) )
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :str = []
__UpperCamelCase :str = []
for label_file in glob.glob(os.path.join(SCREAMING_SNAKE_CASE , '''*.txt''' ) ):
__UpperCamelCase :Any = label_file.split(os.sep )[-1].rsplit('''.''' , 1 )[0]
with open(SCREAMING_SNAKE_CASE ) as in_file:
__UpperCamelCase :str = in_file.readlines()
__UpperCamelCase :Optional[int] = os.path.join(SCREAMING_SNAKE_CASE , f"""{label_name}.jpg""" )
__UpperCamelCase :int = []
for obj_list in obj_lists:
__UpperCamelCase :Optional[int] = obj_list.rstrip('''\n''' ).split(''' ''' )
__UpperCamelCase :Any = float(obj[1] ) - float(obj[3] ) / 2
__UpperCamelCase :List[str] = float(obj[2] ) - float(obj[4] ) / 2
__UpperCamelCase :Dict = float(obj[1] ) + float(obj[3] ) / 2
__UpperCamelCase :List[str] = float(obj[2] ) + float(obj[4] ) / 2
boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] )
if not boxes:
continue
img_paths.append(SCREAMING_SNAKE_CASE )
labels.append(SCREAMING_SNAKE_CASE )
return img_paths, labels
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 0.0 , ):
'''simple docstring'''
__UpperCamelCase :List[str] = np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta )
__UpperCamelCase :List[Any] = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
__UpperCamelCase :int = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
__UpperCamelCase :Optional[int] = int(scale_x * output_size[1] )
__UpperCamelCase :Any = int(scale_y * output_size[0] )
__UpperCamelCase :List[str] = []
__UpperCamelCase :Dict = []
for i, index in enumerate(SCREAMING_SNAKE_CASE ):
__UpperCamelCase :Any = all_img_list[index]
path_list.append(SCREAMING_SNAKE_CASE )
__UpperCamelCase :Any = all_annos[index]
__UpperCamelCase :Union[str, Any] = cva.imread(SCREAMING_SNAKE_CASE )
if i == 0: # top-left
__UpperCamelCase :str = cva.resize(SCREAMING_SNAKE_CASE , (divid_point_x, divid_point_y) )
__UpperCamelCase :Union[str, Any] = img
for bbox in img_annos:
__UpperCamelCase :Union[str, Any] = bbox[1] * scale_x
__UpperCamelCase :Optional[Any] = bbox[2] * scale_y
__UpperCamelCase :int = bbox[3] * scale_x
__UpperCamelCase :Union[str, Any] = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 1: # top-right
__UpperCamelCase :str = cva.resize(SCREAMING_SNAKE_CASE , (output_size[1] - divid_point_x, divid_point_y) )
__UpperCamelCase :List[str] = img
for bbox in img_annos:
__UpperCamelCase :str = scale_x + bbox[1] * (1 - scale_x)
__UpperCamelCase :Dict = bbox[2] * scale_y
__UpperCamelCase :Optional[Any] = scale_x + bbox[3] * (1 - scale_x)
__UpperCamelCase :List[Any] = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 2: # bottom-left
__UpperCamelCase :str = cva.resize(SCREAMING_SNAKE_CASE , (divid_point_x, output_size[0] - divid_point_y) )
__UpperCamelCase :Optional[int] = img
for bbox in img_annos:
__UpperCamelCase :Tuple = bbox[1] * scale_x
__UpperCamelCase :Optional[Any] = scale_y + bbox[2] * (1 - scale_y)
__UpperCamelCase :Tuple = bbox[3] * scale_x
__UpperCamelCase :Dict = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
else: # bottom-right
__UpperCamelCase :Optional[int] = cva.resize(
SCREAMING_SNAKE_CASE , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) )
__UpperCamelCase :Optional[int] = img
for bbox in img_annos:
__UpperCamelCase :Optional[Any] = scale_x + bbox[1] * (1 - scale_x)
__UpperCamelCase :Optional[int] = scale_y + bbox[2] * (1 - scale_y)
__UpperCamelCase :Optional[Any] = scale_x + bbox[3] * (1 - scale_x)
__UpperCamelCase :int = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
# Remove bounding box small than scale of filter
if filter_scale > 0:
__UpperCamelCase :List[Any] = [
anno
for anno in new_anno
if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2])
]
return output_img, new_anno, path_list[0]
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
assert number_char > 1, "The number of character should greater than 1"
__UpperCamelCase :Optional[Any] = ascii_lowercase + digits
return "".join(random.choice(SCREAMING_SNAKE_CASE ) for _ in range(SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
main()
print('''DONE ✅''')
| 43
| 0
|
"""simple docstring"""
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ConditionalDetrImageProcessor
class __A ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , __A , __A=7 , __A=3 , __A=30 , __A=400 , __A=True , __A=None , __A=True , __A=[0.5, 0.5, 0.5] , __A=[0.5, 0.5, 0.5] , __A=True , __A=1 / 255 , __A=True , ) -> List[str]:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
a =size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 1333}
a =parent
a =batch_size
a =num_channels
a =min_resolution
a =max_resolution
a =do_resize
a =size
a =do_normalize
a =image_mean
a =image_std
a =do_rescale
a =rescale_factor
a =do_pad
def SCREAMING_SNAKE_CASE ( self ) -> str:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def SCREAMING_SNAKE_CASE ( self , __A , __A=False ) -> Tuple:
if not batched:
a =image_inputs[0]
if isinstance(__lowercase , Image.Image ):
a =image.size
else:
a =image.shape[1], image.shape[2]
if w < h:
a =int(self.size['''shortest_edge'''] * h / w )
a =self.size['''shortest_edge''']
elif w > h:
a =self.size['''shortest_edge''']
a =int(self.size['''shortest_edge'''] * w / h )
else:
a =self.size['''shortest_edge''']
a =self.size['''shortest_edge''']
else:
a =[]
for image in image_inputs:
a =self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
a =max(__lowercase , key=lambda __A : item[0] )[0]
a =max(__lowercase , key=lambda __A : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class __A ( UpperCAmelCase_, unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase = ConditionalDetrImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
a =ConditionalDetrImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE ( self ) -> str:
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
a =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowercase , '''image_mean''' ) )
self.assertTrue(hasattr(__lowercase , '''image_std''' ) )
self.assertTrue(hasattr(__lowercase , '''do_normalize''' ) )
self.assertTrue(hasattr(__lowercase , '''do_resize''' ) )
self.assertTrue(hasattr(__lowercase , '''size''' ) )
def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
a =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 1333} )
self.assertEqual(image_processor.do_pad , __lowercase )
a =self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=__lowercase )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84} )
self.assertEqual(image_processor.do_pad , __lowercase )
def SCREAMING_SNAKE_CASE ( self ) -> str:
pass
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
# Initialize image_processing
a =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
a =prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase , Image.Image )
# Test not batched input
a =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
a =self.image_processor_tester.get_expected_values(__lowercase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
a =self.image_processor_tester.get_expected_values(__lowercase , batched=__lowercase )
a =image_processing(__lowercase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
# Initialize image_processing
a =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
a =prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowercase , numpify=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase , np.ndarray )
# Test not batched input
a =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
a =self.image_processor_tester.get_expected_values(__lowercase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
a =image_processing(__lowercase , return_tensors='''pt''' ).pixel_values
a =self.image_processor_tester.get_expected_values(__lowercase , batched=__lowercase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
# Initialize image_processing
a =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
a =prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowercase , torchify=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase , torch.Tensor )
# Test not batched input
a =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
a =self.image_processor_tester.get_expected_values(__lowercase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
a =image_processing(__lowercase , return_tensors='''pt''' ).pixel_values
a =self.image_processor_tester.get_expected_values(__lowercase , batched=__lowercase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def SCREAMING_SNAKE_CASE ( self ) -> Any:
# prepare image and target
a =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
a =json.loads(f.read() )
a ={'''image_id''': 3_9769, '''annotations''': target}
# encode them
a =ConditionalDetrImageProcessor.from_pretrained('''microsoft/conditional-detr-resnet-50''' )
a =image_processing(images=__lowercase , annotations=__lowercase , return_tensors='''pt''' )
# verify pixel values
a =torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['''pixel_values'''].shape , __lowercase )
a =torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , __lowercase , atol=1E-4 ) )
# verify area
a =torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , __lowercase ) )
# verify boxes
a =torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , __lowercase )
a =torch.tensor([0.5_503, 0.2_765, 0.0_604, 0.2_215] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , __lowercase , atol=1E-3 ) )
# verify image_id
a =torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , __lowercase ) )
# verify is_crowd
a =torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , __lowercase ) )
# verify class_labels
a =torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , __lowercase ) )
# verify orig_size
a =torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , __lowercase ) )
# verify size
a =torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , __lowercase ) )
@slow
def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
# prepare image, target and masks_path
a =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
a =json.loads(f.read() )
a ={'''file_name''': '''000000039769.png''', '''image_id''': 3_9769, '''segments_info''': target}
a =pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
a =ConditionalDetrImageProcessor(format='''coco_panoptic''' )
a =image_processing(images=__lowercase , annotations=__lowercase , masks_path=__lowercase , return_tensors='''pt''' )
# verify pixel values
a =torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['''pixel_values'''].shape , __lowercase )
a =torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , __lowercase , atol=1E-4 ) )
# verify area
a =torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , __lowercase ) )
# verify boxes
a =torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , __lowercase )
a =torch.tensor([0.2_625, 0.5_437, 0.4_688, 0.8_625] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , __lowercase , atol=1E-3 ) )
# verify image_id
a =torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , __lowercase ) )
# verify is_crowd
a =torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , __lowercase ) )
# verify class_labels
a =torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , __lowercase ) )
# verify masks
a =82_2873
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , __lowercase )
# verify orig_size
a =torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , __lowercase ) )
# verify size
a =torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , __lowercase ) )
| 81
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase = logging.get_logger(__name__)
__lowercase = {
'''facebook/wav2vec2-base-960h''': '''https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json''',
# See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2
}
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
a__ : Union[str, Any] = """wav2vec2"""
def __init__( self , __lowercase=32 , __lowercase=768 , __lowercase=12 , __lowercase=12 , __lowercase=3_072 , __lowercase="gelu" , __lowercase=0.1 , __lowercase=0.1 , __lowercase=0.1 , __lowercase=0.0 , __lowercase=0.0 , __lowercase=0.1 , __lowercase=0.1 , __lowercase=0.02 , __lowercase=1E-5 , __lowercase="group" , __lowercase="gelu" , __lowercase=(512, 512, 512, 512, 512, 512, 512) , __lowercase=(5, 2, 2, 2, 2, 2, 2) , __lowercase=(10, 3, 3, 3, 3, 2, 2) , __lowercase=False , __lowercase=128 , __lowercase=16 , __lowercase=False , __lowercase=True , __lowercase=0.05 , __lowercase=10 , __lowercase=2 , __lowercase=0.0 , __lowercase=10 , __lowercase=0 , __lowercase=320 , __lowercase=2 , __lowercase=0.1 , __lowercase=100 , __lowercase=256 , __lowercase=256 , __lowercase=0.1 , __lowercase="sum" , __lowercase=False , __lowercase=False , __lowercase=256 , __lowercase=(512, 512, 512, 512, 1_500) , __lowercase=(5, 3, 3, 1, 1) , __lowercase=(1, 2, 3, 1, 1) , __lowercase=512 , __lowercase=0 , __lowercase=1 , __lowercase=2 , __lowercase=False , __lowercase=3 , __lowercase=2 , __lowercase=3 , __lowercase=None , __lowercase=None , **__lowercase , ) -> int:
super().__init__(**__lowercase , pad_token_id=__lowercase , bos_token_id=__lowercase , eos_token_id=__lowercase)
__UpperCamelCase :Any = hidden_size
__UpperCamelCase :int = feat_extract_norm
__UpperCamelCase :Tuple = feat_extract_activation
__UpperCamelCase :Union[str, Any] = list(__lowercase)
__UpperCamelCase :List[Any] = list(__lowercase)
__UpperCamelCase :int = list(__lowercase)
__UpperCamelCase :List[Any] = conv_bias
__UpperCamelCase :Optional[int] = num_conv_pos_embeddings
__UpperCamelCase :Dict = num_conv_pos_embedding_groups
__UpperCamelCase :Any = len(self.conv_dim)
__UpperCamelCase :List[str] = num_hidden_layers
__UpperCamelCase :int = intermediate_size
__UpperCamelCase :str = hidden_act
__UpperCamelCase :Any = num_attention_heads
__UpperCamelCase :int = hidden_dropout
__UpperCamelCase :Tuple = attention_dropout
__UpperCamelCase :List[str] = activation_dropout
__UpperCamelCase :Optional[Any] = feat_proj_dropout
__UpperCamelCase :Any = final_dropout
__UpperCamelCase :Any = layerdrop
__UpperCamelCase :str = layer_norm_eps
__UpperCamelCase :Optional[Any] = initializer_range
__UpperCamelCase :List[str] = vocab_size
__UpperCamelCase :str = do_stable_layer_norm
__UpperCamelCase :Union[str, Any] = use_weighted_layer_sum
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
f""" {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`,"""
f""" `len(config.conv_kernel) = {len(self.conv_kernel)}`.""")
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__UpperCamelCase :List[Any] = apply_spec_augment
__UpperCamelCase :Tuple = mask_time_prob
__UpperCamelCase :int = mask_time_length
__UpperCamelCase :Dict = mask_time_min_masks
__UpperCamelCase :str = mask_feature_prob
__UpperCamelCase :List[str] = mask_feature_length
__UpperCamelCase :Union[str, Any] = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
__UpperCamelCase :Optional[Any] = num_codevectors_per_group
__UpperCamelCase :List[Any] = num_codevector_groups
__UpperCamelCase :Tuple = contrastive_logits_temperature
__UpperCamelCase :Optional[int] = feat_quantizer_dropout
__UpperCamelCase :Optional[int] = num_negatives
__UpperCamelCase :List[Any] = codevector_dim
__UpperCamelCase :str = proj_codevector_dim
__UpperCamelCase :List[str] = diversity_loss_weight
# ctc loss
__UpperCamelCase :Tuple = ctc_loss_reduction
__UpperCamelCase :Tuple = ctc_zero_infinity
# adapter
__UpperCamelCase :List[str] = add_adapter
__UpperCamelCase :Tuple = adapter_kernel_size
__UpperCamelCase :str = adapter_stride
__UpperCamelCase :Tuple = num_adapter_layers
__UpperCamelCase :Tuple = output_hidden_size or hidden_size
__UpperCamelCase :Optional[Any] = adapter_attn_dim
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
__UpperCamelCase :Optional[Any] = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
__UpperCamelCase :Optional[int] = list(__lowercase)
__UpperCamelCase :List[Any] = list(__lowercase)
__UpperCamelCase :List[Any] = list(__lowercase)
__UpperCamelCase :str = xvector_output_dim
@property
def UpperCamelCase__ ( self) -> List[str]:
return functools.reduce(operator.mul , self.conv_stride , 1)
| 43
| 0
|
import os
from collections import namedtuple
import pytest
from datasets import ClassLabel, Features, Sequence, Value
from datasets.commands.test import TestCommand
from datasets.info import DatasetInfo, DatasetInfosDict
a__: List[str] = namedtuple(
'_TestCommandArgs',
[
'dataset',
'name',
'cache_dir',
'data_dir',
'all_configs',
'save_infos',
'ignore_verifications',
'force_redownload',
'clear_cache',
],
defaults=[None, None, None, False, False, False, False, False],
)
def UpperCamelCase__( UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any] )->Optional[int]:
return (abs(source - target ) / target) < 0.01
@pytest.mark.integration
def UpperCamelCase__( UpperCamelCase__ : Dict )->List[str]:
A__ = _TestCommandArgs(dataset=UpperCamelCase__ , all_configs=UpperCamelCase__ , save_infos=UpperCamelCase__ )
A__ = TestCommand(*UpperCamelCase__ )
test_command.run()
A__ = os.path.join(UpperCamelCase__ , '''README.md''' )
assert os.path.exists(UpperCamelCase__ )
A__ = DatasetInfosDict.from_directory(UpperCamelCase__ )
A__ = DatasetInfosDict(
{
'''default''': DatasetInfo(
features=Features(
{
'''tokens''': Sequence(Value('''string''' ) ),
'''ner_tags''': Sequence(
ClassLabel(names=['''O''', '''B-PER''', '''I-PER''', '''B-ORG''', '''I-ORG''', '''B-LOC''', '''I-LOC'''] ) ),
'''langs''': Sequence(Value('''string''' ) ),
'''spans''': Sequence(Value('''string''' ) ),
} ) , splits=[
{
'''name''': '''train''',
'''num_bytes''': 2_35_15_63,
'''num_examples''': 1_00_00,
},
{
'''name''': '''validation''',
'''num_bytes''': 23_84_18,
'''num_examples''': 10_00,
},
] , download_size=3_94_06_80 , dataset_size=2_58_99_81 , )
} )
assert dataset_infos.keys() == expected_dataset_infos.keys()
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
A__ = getattr(dataset_infos['''default'''] , UpperCamelCase__ ), getattr(expected_dataset_infos['''default'''] , UpperCamelCase__ )
if key == "num_bytes":
assert is_apercent_close(UpperCamelCase__ , UpperCamelCase__ )
elif key == "splits":
assert list(UpperCamelCase__ ) == list(UpperCamelCase__ )
for split in result:
assert result[split].name == expected[split].name
assert result[split].num_examples == expected[split].num_examples
assert is_apercent_close(result[split].num_bytes , expected[split].num_bytes )
else:
result == expected
| 193
|
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
__lowercase = logging.get_logger(__name__)
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
a__ : Optional[Any] = ["""pixel_values"""]
def __init__( self , __lowercase = True , __lowercase = 32 , __lowercase=PILImageResampling.BILINEAR , __lowercase = True , **__lowercase , ) -> None:
__UpperCamelCase :Optional[int] = do_resize
__UpperCamelCase :Any = do_rescale
__UpperCamelCase :str = size_divisor
__UpperCamelCase :Dict = resample
super().__init__(**__lowercase)
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase , __lowercase = None , **__lowercase) -> np.ndarray:
__UpperCamelCase , __UpperCamelCase :int = get_image_size(__lowercase)
# Rounds the height and width down to the closest multiple of size_divisor
__UpperCamelCase :List[Any] = height // size_divisor * size_divisor
__UpperCamelCase :List[str] = width // size_divisor * size_divisor
__UpperCamelCase :str = resize(__lowercase , (new_h, new_w) , resample=__lowercase , data_format=__lowercase , **__lowercase)
return image
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase = None , **__lowercase) -> np.ndarray:
return rescale(image=__lowercase , scale=__lowercase , data_format=__lowercase , **__lowercase)
def UpperCamelCase__ ( self , __lowercase , __lowercase = None , __lowercase = None , __lowercase=None , __lowercase = None , __lowercase = None , __lowercase = ChannelDimension.FIRST , **__lowercase , ) -> BatchFeature:
__UpperCamelCase :Union[str, Any] = do_resize if do_resize is not None else self.do_resize
__UpperCamelCase :Tuple = do_rescale if do_rescale is not None else self.do_rescale
__UpperCamelCase :List[str] = size_divisor if size_divisor is not None else self.size_divisor
__UpperCamelCase :List[Any] = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError('''size_divisor is required for resizing''')
__UpperCamelCase :List[Any] = make_list_of_images(__lowercase)
if not valid_images(__lowercase):
raise ValueError('''Invalid image(s)''')
# All transformations expect numpy arrays.
__UpperCamelCase :Optional[Any] = [to_numpy_array(__lowercase) for img in images]
if do_resize:
__UpperCamelCase :List[str] = [self.resize(__lowercase , size_divisor=__lowercase , resample=__lowercase) for image in images]
if do_rescale:
__UpperCamelCase :Dict = [self.rescale(__lowercase , scale=1 / 255) for image in images]
__UpperCamelCase :str = [to_channel_dimension_format(__lowercase , __lowercase) for image in images]
__UpperCamelCase :int = {'''pixel_values''': images}
return BatchFeature(data=__lowercase , tensor_type=__lowercase)
| 43
| 0
|
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
HubertConfig,
HubertForCTC,
HubertModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
def lowerCAmelCase__( lowercase : Union[str, Any] , lowercase : Any , lowercase : Any , lowercase : Any , lowercase : Union[str, Any] ) -> List[str]:
for attribute in key.split("." ):
__snake_case : str = getattr(lowercase , lowercase )
if weight_type is not None:
__snake_case : Any = getattr(lowercase , lowercase ).shape
else:
__snake_case : Union[str, Any] = hf_pointer.shape
assert hf_shape == value.shape, (
f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
__snake_case : str = value
elif weight_type == "weight_g":
__snake_case : List[str] = value
elif weight_type == "weight_v":
__snake_case : str = value
elif weight_type == "bias":
__snake_case : Union[str, Any] = value
else:
__snake_case : str = value
logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def lowerCAmelCase__( lowercase : Any , lowercase : Optional[Any] , lowercase : Optional[int] ) -> int:
__snake_case : List[Any] = []
__snake_case : int = fairseq_model.state_dict()
__snake_case : List[Any] = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
__snake_case : List[Any] = False
if "conv_layers" in name:
load_conv_layer(
lowercase , lowercase , lowercase , lowercase , hf_model.config.feat_extract_norm == "group" , )
__snake_case : List[str] = True
else:
for key, mapped_key in MAPPING.items():
__snake_case : Dict = '''hubert.''' + mapped_key if (is_finetuned and mapped_key != '''lm_head''') else mapped_key
if key in name or (key.split("w2v_model." )[-1] == name.split("." )[0] and not is_finetuned):
__snake_case : Optional[Any] = True
if "*" in mapped_key:
__snake_case : List[str] = name.split(lowercase )[0].split("." )[-2]
__snake_case : Optional[int] = mapped_key.replace("*" , lowercase )
if "weight_g" in name:
__snake_case : int = '''weight_g'''
elif "weight_v" in name:
__snake_case : List[Any] = '''weight_v'''
elif "weight" in name:
__snake_case : Dict = '''weight'''
elif "bias" in name:
__snake_case : Dict = '''bias'''
else:
__snake_case : Dict = None
set_recursively(lowercase , lowercase , lowercase , lowercase , lowercase )
continue
if not is_used:
unused_weights.append(lowercase )
logger.warning(f"""Unused weights: {unused_weights}""" )
def lowerCAmelCase__( lowercase : int , lowercase : Union[str, Any] , lowercase : Tuple , lowercase : Dict , lowercase : Any ) -> Union[str, Any]:
__snake_case : Tuple = full_name.split("conv_layers." )[-1]
__snake_case : Optional[int] = name.split("." )
__snake_case : str = int(items[0] )
__snake_case : List[Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
__snake_case : Dict = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
__snake_case : Any = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
__snake_case : int = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
__snake_case : Union[str, Any] = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(lowercase )
@torch.no_grad()
def lowerCAmelCase__( lowercase : Union[str, Any] , lowercase : Dict , lowercase : Dict=None , lowercase : Tuple=None , lowercase : Optional[Any]=True ) -> Optional[int]:
if config_path is not None:
__snake_case : Tuple = HubertConfig.from_pretrained(lowercase )
else:
__snake_case : Optional[int] = HubertConfig()
if is_finetuned:
if dict_path:
__snake_case : Optional[int] = Dictionary.load(lowercase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__snake_case : Optional[int] = target_dict.pad_index
__snake_case : Dict = target_dict.bos_index
__snake_case : str = target_dict.eos_index
__snake_case : Dict = len(target_dict.symbols )
__snake_case : List[Any] = os.path.join(lowercase , "vocab.json" )
if not os.path.isdir(lowercase ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(lowercase ) )
return
os.makedirs(lowercase , exist_ok=lowercase )
with open(lowercase , "w" , encoding="utf-8" ) as vocab_handle:
json.dump(target_dict.indices , lowercase )
__snake_case : Optional[int] = WavaVecaCTCTokenizer(
lowercase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=lowercase , )
__snake_case : Union[str, Any] = True if config.feat_extract_norm == '''layer''' else False
__snake_case : Any = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=lowercase , return_attention_mask=lowercase , )
__snake_case : Any = WavaVecaProcessor(feature_extractor=lowercase , tokenizer=lowercase )
processor.save_pretrained(lowercase )
__snake_case : List[str] = HubertForCTC(lowercase )
else:
__snake_case : str = HubertModel(lowercase )
if is_finetuned:
__snake_case : int = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
else:
__snake_case : Any = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
__snake_case : Dict = model[0].eval()
recursively_load_weights(lowercase , lowercase , lowercase )
hf_wavavec.save_pretrained(lowercase )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
_UpperCamelCase = parser.parse_args()
convert_hubert_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 326
|
from __future__ import annotations
from PIL import Image
# Define glider example
__lowercase = [
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
# Define blinker example
__lowercase = [[0, 1, 0], [0, 1, 0], [0, 1, 0]]
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :int = []
for i in range(len(SCREAMING_SNAKE_CASE ) ):
__UpperCamelCase :Dict = []
for j in range(len(cells[i] ) ):
# Get the number of live neighbours
__UpperCamelCase :List[str] = 0
if i > 0 and j > 0:
neighbour_count += cells[i - 1][j - 1]
if i > 0:
neighbour_count += cells[i - 1][j]
if i > 0 and j < len(cells[i] ) - 1:
neighbour_count += cells[i - 1][j + 1]
if j > 0:
neighbour_count += cells[i][j - 1]
if j < len(cells[i] ) - 1:
neighbour_count += cells[i][j + 1]
if i < len(SCREAMING_SNAKE_CASE ) - 1 and j > 0:
neighbour_count += cells[i + 1][j - 1]
if i < len(SCREAMING_SNAKE_CASE ) - 1:
neighbour_count += cells[i + 1][j]
if i < len(SCREAMING_SNAKE_CASE ) - 1 and j < len(cells[i] ) - 1:
neighbour_count += cells[i + 1][j + 1]
# Rules of the game of life (excerpt from Wikipedia):
# 1. Any live cell with two or three live neighbours survives.
# 2. Any dead cell with three live neighbours becomes a live cell.
# 3. All other live cells die in the next generation.
# Similarly, all other dead cells stay dead.
__UpperCamelCase :List[str] = cells[i][j] == 1
if (
(alive and 2 <= neighbour_count <= 3)
or not alive
and neighbour_count == 3
):
next_generation_row.append(1 )
else:
next_generation_row.append(0 )
next_generation.append(SCREAMING_SNAKE_CASE )
return next_generation
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Optional[Any] = []
for _ in range(SCREAMING_SNAKE_CASE ):
# Create output image
__UpperCamelCase :Dict = Image.new('''RGB''' , (len(cells[0] ), len(SCREAMING_SNAKE_CASE )) )
__UpperCamelCase :Any = img.load()
# Save cells to image
for x in range(len(SCREAMING_SNAKE_CASE ) ):
for y in range(len(cells[0] ) ):
__UpperCamelCase :Optional[Any] = 255 - cells[y][x] * 255
__UpperCamelCase :int = (colour, colour, colour)
# Save image
images.append(SCREAMING_SNAKE_CASE )
__UpperCamelCase :Optional[int] = new_generation(SCREAMING_SNAKE_CASE )
return images
if __name__ == "__main__":
__lowercase = generate_images(GLIDER, 16)
images[0].save('''out.gif''', save_all=True, append_images=images[1:])
| 43
| 0
|
'''simple docstring'''
class _a :
'''simple docstring'''
def __init__( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = ''''''
SCREAMING_SNAKE_CASE : Dict = ''''''
SCREAMING_SNAKE_CASE : Union[str, Any] = []
def UpperCamelCase_ ( self, A, A ):
'''simple docstring'''
if m == -1:
return n + 1
elif n == -1:
return m + 1
elif self.dp[m][n] > -1:
return self.dp[m][n]
else:
if self.worda[m] == self.worda[n]:
SCREAMING_SNAKE_CASE : int = self.__min_dist_top_down_dp(m - 1, n - 1 )
else:
SCREAMING_SNAKE_CASE : int = self.__min_dist_top_down_dp(__lowercase, n - 1 )
SCREAMING_SNAKE_CASE : Dict = self.__min_dist_top_down_dp(m - 1, __lowercase )
SCREAMING_SNAKE_CASE : int = self.__min_dist_top_down_dp(m - 1, n - 1 )
SCREAMING_SNAKE_CASE : List[Any] = 1 + min(__lowercase, __lowercase, __lowercase )
return self.dp[m][n]
def UpperCamelCase_ ( self, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = worda
SCREAMING_SNAKE_CASE : Optional[int] = worda
SCREAMING_SNAKE_CASE : Any = [[-1 for _ in range(len(__lowercase ) )] for _ in range(len(__lowercase ) )]
return self.__min_dist_top_down_dp(len(__lowercase ) - 1, len(__lowercase ) - 1 )
def UpperCamelCase_ ( self, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = worda
SCREAMING_SNAKE_CASE : Tuple = worda
SCREAMING_SNAKE_CASE : str = len(__lowercase )
SCREAMING_SNAKE_CASE : Union[str, Any] = len(__lowercase )
SCREAMING_SNAKE_CASE : Union[str, Any] = [[0 for _ in range(n + 1 )] for _ in range(m + 1 )]
for i in range(m + 1 ):
for j in range(n + 1 ):
if i == 0: # first string is empty
SCREAMING_SNAKE_CASE : Dict = j
elif j == 0: # second string is empty
SCREAMING_SNAKE_CASE : Union[str, Any] = i
elif worda[i - 1] == worda[j - 1]: # last characters are equal
SCREAMING_SNAKE_CASE : List[str] = self.dp[i - 1][j - 1]
else:
SCREAMING_SNAKE_CASE : str = self.dp[i][j - 1]
SCREAMING_SNAKE_CASE : List[str] = self.dp[i - 1][j]
SCREAMING_SNAKE_CASE : Union[str, Any] = self.dp[i - 1][j - 1]
SCREAMING_SNAKE_CASE : str = 1 + min(__lowercase, __lowercase, __lowercase )
return self.dp[m][n]
if __name__ == "__main__":
UpperCamelCase_ = EditDistance()
print("****************** Testing Edit Distance DP Algorithm ******************")
print()
UpperCamelCase_ = input("Enter the first string: ").strip()
UpperCamelCase_ = input("Enter the second string: ").strip()
print()
print(F"""The minimum edit distance is: {solver.min_dist_top_down(Sa, Sa)}""")
print(F"""The minimum edit distance is: {solver.min_dist_bottom_up(Sa, Sa)}""")
print()
print("*************** End of Testing Edit Distance DP Algorithm ***************")
| 251
|
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
__lowercase = logging.get_logger(__name__)
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Tuple = R'''\w+[.]\d+'''
__UpperCamelCase :List[str] = re.findall(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for pat in pats:
__UpperCamelCase :int = key.replace(SCREAMING_SNAKE_CASE , '''_'''.join(pat.split('''.''' ) ) )
return key
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Tuple = pt_tuple_key[:-1] + ('''scale''',)
if (
any('''norm''' in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
__UpperCamelCase :str = pt_tuple_key[:-1] + ('''scale''',)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
__UpperCamelCase :Any = pt_tuple_key[:-1] + ('''scale''',)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
__UpperCamelCase :str = pt_tuple_key[:-1] + ('''embedding''',)
return renamed_pt_tuple_key, pt_tensor
# conv layer
__UpperCamelCase :List[str] = pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
__UpperCamelCase :List[Any] = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
__UpperCamelCase :List[str] = pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight":
__UpperCamelCase :Any = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
__UpperCamelCase :int = pt_tuple_key[:-1] + ('''weight''',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
__UpperCamelCase :int = pt_tuple_key[:-1] + ('''bias''',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=42 ):
'''simple docstring'''
__UpperCamelCase :Union[str, Any] = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
__UpperCamelCase :str = flax_model.init_weights(PRNGKey(SCREAMING_SNAKE_CASE ) )
__UpperCamelCase :int = flatten_dict(SCREAMING_SNAKE_CASE )
__UpperCamelCase :List[Any] = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
__UpperCamelCase :List[Any] = rename_key(SCREAMING_SNAKE_CASE )
__UpperCamelCase :List[Any] = tuple(renamed_pt_key.split('''.''' ) )
# Correctly rename weight parameters
__UpperCamelCase , __UpperCamelCase :Any = rename_key_and_reshape_tensor(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f"""PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape """
f"""{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
# also add unexpected weight so that warning is thrown
__UpperCamelCase :str = jnp.asarray(SCREAMING_SNAKE_CASE )
return unflatten_dict(SCREAMING_SNAKE_CASE )
| 43
| 0
|
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class A__ ( UpperCAmelCase_ ):
"""simple docstring"""
UpperCamelCase_ : Any = ["""image_processor""", """tokenizer"""]
UpperCamelCase_ : Optional[Any] = """ViltImageProcessor"""
UpperCamelCase_ : Optional[int] = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__( self : Tuple , lowerCAmelCase__ : Any=None , lowerCAmelCase__ : Optional[int]=None , **lowerCAmelCase__ : Optional[int] ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase : Union[str, Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __lowercase , )
_UpperCAmelCase : Tuple = kwargs.pop("feature_extractor" )
_UpperCAmelCase : Any = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(__lowercase , __lowercase )
_UpperCAmelCase : Optional[Any] = self.image_processor
def __call__( self : Dict , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Tuple = None , lowerCAmelCase__ : str = True , lowerCAmelCase__ : Any = False , lowerCAmelCase__ : int = None , lowerCAmelCase__ : int = None , lowerCAmelCase__ : Tuple = 0 , lowerCAmelCase__ : List[str] = None , lowerCAmelCase__ : Tuple = None , lowerCAmelCase__ : int = None , lowerCAmelCase__ : Tuple = False , lowerCAmelCase__ : Optional[Any] = False , lowerCAmelCase__ : Optional[int] = False , lowerCAmelCase__ : str = False , lowerCAmelCase__ : Dict = True , lowerCAmelCase__ : List[Any] = None , **lowerCAmelCase__ : str , ) -> BatchEncoding:
"""simple docstring"""
_UpperCAmelCase : Tuple = self.tokenizer(
text=__lowercase , add_special_tokens=__lowercase , padding=__lowercase , truncation=__lowercase , max_length=__lowercase , stride=__lowercase , pad_to_multiple_of=__lowercase , return_token_type_ids=__lowercase , return_attention_mask=__lowercase , return_overflowing_tokens=__lowercase , return_special_tokens_mask=__lowercase , return_offsets_mapping=__lowercase , return_length=__lowercase , verbose=__lowercase , return_tensors=__lowercase , **__lowercase , )
# add pixel_values + pixel_mask
_UpperCAmelCase : str = self.image_processor(__lowercase , return_tensors=__lowercase )
encoding.update(__lowercase )
return encoding
def _lowerCAmelCase ( self : List[Any] , *lowerCAmelCase__ : Optional[int] , **lowerCAmelCase__ : int ) -> str:
"""simple docstring"""
return self.tokenizer.batch_decode(*__lowercase , **__lowercase )
def _lowerCAmelCase ( self : Optional[Any] , *lowerCAmelCase__ : Optional[Any] , **lowerCAmelCase__ : Any ) -> Optional[int]:
"""simple docstring"""
return self.tokenizer.decode(*__lowercase , **__lowercase )
@property
def _lowerCAmelCase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase : int = self.tokenizer.model_input_names
_UpperCAmelCase : int = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def _lowerCAmelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __lowercase , )
return self.image_processor_class
@property
def _lowerCAmelCase ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __lowercase , )
return self.image_processor
| 145
|
import argparse
import torch
from ...utils import logging
from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert
logging.set_verbosity_info()
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :List[Any] = AlbertConfig.from_json_file(SCREAMING_SNAKE_CASE )
print(f"""Building PyTorch model from configuration: {config}""" )
__UpperCamelCase :List[str] = AlbertForPreTraining(SCREAMING_SNAKE_CASE )
# Load weights from tf checkpoint
load_tf_weights_in_albert(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--albert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained ALBERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__lowercase = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
| 43
| 0
|
from .imports import is_tqdm_available
if is_tqdm_available():
from tqdm.auto import tqdm as _tqdm
from ..state import PartialState
def A ( _lowerCamelCase = True , *_lowerCamelCase , **_lowerCamelCase ):
'''simple docstring'''
if not is_tqdm_available():
raise ImportError("Accelerate\'s `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`." )
_lowerCAmelCase : int = False
if main_process_only:
_lowerCAmelCase : Union[str, Any] = PartialState().local_process_index == 0
return _tqdm(*_lowerCamelCase , **_lowerCamelCase , disable=_lowerCamelCase )
| 36
|
import math
import qiskit
def lowerCamelCase ( SCREAMING_SNAKE_CASE = 1 , SCREAMING_SNAKE_CASE = 1 , SCREAMING_SNAKE_CASE = 1 ):
'''simple docstring'''
if (
isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
or isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
or isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
):
raise TypeError('''inputs must be integers.''' )
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError('''inputs must be positive.''' )
if (
(math.floor(SCREAMING_SNAKE_CASE ) != input_a)
or (math.floor(SCREAMING_SNAKE_CASE ) != input_a)
or (math.floor(SCREAMING_SNAKE_CASE ) != carry_in)
):
raise ValueError('''inputs must be exact integers.''' )
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError('''inputs must be less or equal to 2.''' )
# build registers
__UpperCamelCase :List[str] = qiskit.QuantumRegister(4 , '''qr''' )
__UpperCamelCase :str = qiskit.ClassicalRegister(2 , '''cr''' )
# list the entries
__UpperCamelCase :Tuple = [input_a, input_a, carry_in]
__UpperCamelCase :Optional[int] = qiskit.QuantumCircuit(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for i in range(0 , 3 ):
if entry[i] == 2:
quantum_circuit.h(SCREAMING_SNAKE_CASE ) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(SCREAMING_SNAKE_CASE ) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(SCREAMING_SNAKE_CASE ) # for 0 entries
# build the circuit
quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate
quantum_circuit.cx(0 , 1 )
quantum_circuit.ccx(1 , 2 , 3 )
quantum_circuit.cx(1 , 2 )
quantum_circuit.cx(0 , 1 )
quantum_circuit.measure([2, 3] , SCREAMING_SNAKE_CASE ) # measure the last two qbits
__UpperCamelCase :Optional[Any] = qiskit.Aer.get_backend('''aer_simulator''' )
__UpperCamelCase :Tuple = qiskit.execute(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , shots=1_000 )
return job.result().get_counts(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
print(F'Total sum count for state is: {quantum_full_adder(1, 1, 1)}')
| 43
| 0
|
import re
def lowerCAmelCase_ ( __a ) -> Tuple:
"""simple docstring"""
return [char.split() for char in re.split(R"[^ a-z A-Z 0-9 \s]" , str_ )]
def lowerCAmelCase_ ( __a ) -> List[str]:
"""simple docstring"""
lowerCamelCase__: Tuple =split_input(str_ )
return "".join(
["".join([char.capitalize() for char in sub_str] ) for sub_str in string_split] )
def lowerCAmelCase_ ( __a , __a , __a ) -> Tuple:
"""simple docstring"""
try:
lowerCamelCase__: Union[str, Any] =split_input(__a )
if upper:
lowerCamelCase__: Union[str, Any] =''''''.join(
[
separator.join([char.upper() for char in sub_str] )
for sub_str in string_split
] )
else:
lowerCamelCase__: Any =''''''.join(
[
separator.join([char.lower() for char in sub_str] )
for sub_str in string_split
] )
return res_str
except IndexError:
return "not valid string"
def lowerCAmelCase_ ( __a ) -> Union[str, Any]:
"""simple docstring"""
return to_simple_case(__a )
def lowerCAmelCase_ ( __a ) -> Optional[int]:
"""simple docstring"""
try:
lowerCamelCase__: Tuple =to_simple_case(__a )
return res_str[0].lower() + res_str[1:]
except IndexError:
return "not valid string"
def lowerCAmelCase_ ( __a , __a ) -> List[Any]:
"""simple docstring"""
return to_complex_case(__a , __a , "_" )
def lowerCAmelCase_ ( __a , __a ) -> Optional[Any]:
"""simple docstring"""
return to_complex_case(__a , __a , "-" )
if __name__ == "__main__":
__import__("doctest").testmod()
| 10
|
import random
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Optional[Any] = a[left_index]
__UpperCamelCase :Any = left_index + 1
for j in range(left_index + 1 , SCREAMING_SNAKE_CASE ):
if a[j] < pivot:
__UpperCamelCase , __UpperCamelCase :str = a[i], a[j]
i += 1
__UpperCamelCase , __UpperCamelCase :Optional[int] = a[i - 1], a[left_index]
return i - 1
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if left < right:
__UpperCamelCase :int = random.randint(SCREAMING_SNAKE_CASE , right - 1 )
__UpperCamelCase , __UpperCamelCase :List[str] = (
a[left],
a[pivot],
) # switches the pivot with the left most bound
__UpperCamelCase :Dict = partition(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
quick_sort_random(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # recursive quicksort to the left of the pivot point
quick_sort_random(
SCREAMING_SNAKE_CASE , pivot_index + 1 , SCREAMING_SNAKE_CASE ) # recursive quicksort to the right of the pivot point
def lowerCamelCase ( ):
'''simple docstring'''
__UpperCamelCase :Tuple = input('''Enter numbers separated by a comma:\n''' ).strip()
__UpperCamelCase :Union[str, Any] = [int(SCREAMING_SNAKE_CASE ) for item in user_input.split(''',''' )]
quick_sort_random(SCREAMING_SNAKE_CASE , 0 , len(SCREAMING_SNAKE_CASE ) )
print(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 43
| 0
|
"""simple docstring"""
import unittest
from transformers import BertGenerationConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import BertGenerationDecoder, BertGenerationEncoder
class a__ :
def __init__( self , _a , _a=13 , _a=7 , _a=True , _a=True , _a=99 , _a=32 , _a=5 , _a=4 , _a=37 , _a="gelu" , _a=0.1 , _a=0.1 , _a=50 , _a=0.0_2 , _a=True , _a=None , ):
lowercase : Union[str, Any] = parent
lowercase : List[Any] = batch_size
lowercase : Tuple = seq_length
lowercase : List[Any] = is_training
lowercase : str = use_input_mask
lowercase : str = vocab_size
lowercase : str = hidden_size
lowercase : Tuple = num_hidden_layers
lowercase : int = num_attention_heads
lowercase : Union[str, Any] = intermediate_size
lowercase : Tuple = hidden_act
lowercase : List[Any] = hidden_dropout_prob
lowercase : Any = attention_probs_dropout_prob
lowercase : Any = max_position_embeddings
lowercase : int = initializer_range
lowercase : Tuple = use_labels
lowercase : Any = scope
def __magic_name__ ( self ):
lowercase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase : Any = None
if self.use_input_mask:
lowercase : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
if self.use_labels:
lowercase : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase : int = self.get_config()
return config, input_ids, input_mask, token_labels
def __magic_name__ ( self ):
return BertGenerationConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , is_decoder=__lowercase , initializer_range=self.initializer_range , )
def __magic_name__ ( self ):
(
lowercase
) : Dict = self.prepare_config_and_inputs()
lowercase : Dict = True
lowercase : str = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowercase : str = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
token_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def __magic_name__ ( self , _a , _a , _a , _a , **_a , ):
lowercase : Tuple = BertGenerationEncoder(config=__lowercase )
model.to(__lowercase )
model.eval()
lowercase : Tuple = model(__lowercase , attention_mask=__lowercase )
lowercase : Dict = model(__lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __magic_name__ ( self , _a , _a , _a , _a , _a , _a , **_a , ):
lowercase : str = True
lowercase : Any = BertGenerationEncoder(config=__lowercase )
model.to(__lowercase )
model.eval()
lowercase : Union[str, Any] = model(
__lowercase , attention_mask=__lowercase , encoder_hidden_states=__lowercase , encoder_attention_mask=__lowercase , )
lowercase : Optional[Any] = model(
__lowercase , attention_mask=__lowercase , encoder_hidden_states=__lowercase , )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __magic_name__ ( self , _a , _a , _a , _a , _a , _a , **_a , ):
lowercase : Any = True
lowercase : Union[str, Any] = True
lowercase : Optional[int] = BertGenerationDecoder(config=__lowercase ).to(__lowercase ).eval()
# first forward pass
lowercase : int = model(
__lowercase , attention_mask=__lowercase , encoder_hidden_states=__lowercase , encoder_attention_mask=__lowercase , use_cache=__lowercase , )
lowercase : List[Any] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowercase : Optional[Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowercase : List[Any] = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
lowercase : Optional[int] = torch.cat([input_ids, next_tokens] , dim=-1 )
lowercase : int = torch.cat([input_mask, next_mask] , dim=-1 )
lowercase : Optional[Any] = model(
__lowercase , attention_mask=__lowercase , encoder_hidden_states=__lowercase , encoder_attention_mask=__lowercase , output_hidden_states=__lowercase , )['''hidden_states'''][0]
lowercase : str = model(
__lowercase , attention_mask=__lowercase , encoder_hidden_states=__lowercase , encoder_attention_mask=__lowercase , past_key_values=__lowercase , output_hidden_states=__lowercase , )['''hidden_states'''][0]
# select random slice
lowercase : Union[str, Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowercase : Optional[int] = output_from_no_past[:, -3:, random_slice_idx].detach()
lowercase : Tuple = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__lowercase , __lowercase , atol=1E-3 ) )
def __magic_name__ ( self , _a , _a , _a , _a , *_a , ):
lowercase : str = BertGenerationDecoder(__lowercase )
model.to(__lowercase )
model.eval()
lowercase : List[str] = model(__lowercase , attention_mask=__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __magic_name__ ( self ):
lowercase : Union[str, Any] = self.prepare_config_and_inputs()
lowercase : Union[str, Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class a__ ( UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, unittest.TestCase ):
__lowerCAmelCase = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else ()
__lowerCAmelCase = (BertGenerationDecoder,) if is_torch_available() else ()
__lowerCAmelCase = (
{"""feature-extraction""": BertGenerationEncoder, """text-generation""": BertGenerationDecoder}
if is_torch_available()
else {}
)
def __magic_name__ ( self ):
lowercase : List[Any] = BertGenerationEncoderTester(self )
lowercase : Optional[Any] = ConfigTester(self , config_class=__lowercase , hidden_size=37 )
def __magic_name__ ( self ):
self.config_tester.run_common_tests()
def __magic_name__ ( self ):
lowercase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowercase )
def __magic_name__ ( self ):
lowercase : Any = self.model_tester.prepare_config_and_inputs()
lowercase : Any = '''bert'''
self.model_tester.create_and_check_model(__lowercase , __lowercase , __lowercase , __lowercase )
def __magic_name__ ( self ):
lowercase : str = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*__lowercase )
def __magic_name__ ( self ):
lowercase : int = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*__lowercase )
def __magic_name__ ( self ):
# This regression test was failing with PyTorch < 1.3
(
lowercase
) : int = self.model_tester.prepare_config_and_inputs_for_decoder()
lowercase : List[Any] = None
self.model_tester.create_and_check_model_as_decoder(
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , )
def __magic_name__ ( self ):
lowercase : Dict = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_for_causal_lm(*__lowercase )
@slow
def __magic_name__ ( self ):
lowercase : Dict = BertGenerationEncoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder" )
self.assertIsNotNone(__lowercase )
@require_torch
class a__ ( unittest.TestCase ):
@slow
def __magic_name__ ( self ):
lowercase : Any = BertGenerationEncoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder" )
lowercase : Optional[Any] = torch.tensor([[101, 7_592, 1_010, 2_026, 3_899, 2_003, 10_140, 102]] )
with torch.no_grad():
lowercase : Any = model(__lowercase )[0]
lowercase : Optional[Any] = torch.Size([1, 8, 1_024] )
self.assertEqual(output.shape , __lowercase )
lowercase : str = torch.tensor(
[[[0.1_7_7_5, 0.0_0_8_3, -0.0_3_2_1], [1.6_0_0_2, 0.1_2_8_7, 0.3_9_1_2], [2.1_4_7_3, 0.5_7_9_1, 0.6_0_6_6]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __lowercase , atol=1E-4 ) )
@require_torch
class a__ ( unittest.TestCase ):
@slow
def __magic_name__ ( self ):
lowercase : List[str] = BertGenerationDecoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder" )
lowercase : Tuple = torch.tensor([[101, 7_592, 1_010, 2_026, 3_899, 2_003, 10_140, 102]] )
with torch.no_grad():
lowercase : int = model(__lowercase )[0]
lowercase : Optional[int] = torch.Size([1, 8, 50_358] )
self.assertEqual(output.shape , __lowercase )
lowercase : Optional[Any] = torch.tensor(
[[[-0.5_7_8_8, -2.5_9_9_4, -3.7_0_5_4], [0.0_4_3_8, 4.7_9_9_7, 1.8_7_9_5], [1.5_8_6_2, 6.6_4_0_9, 4.4_6_3_8]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __lowercase , atol=1E-4 ) )
| 202
|
def lowerCamelCase ( SCREAMING_SNAKE_CASE = 1 , SCREAMING_SNAKE_CASE = 1_000 ):
'''simple docstring'''
__UpperCamelCase :Union[str, Any] = 1
__UpperCamelCase :Any = 0
for divide_by_number in range(SCREAMING_SNAKE_CASE , digit + 1 ):
__UpperCamelCase :list[int] = []
__UpperCamelCase :Optional[int] = numerator
for _ in range(1 , digit + 1 ):
if now_divide in has_been_divided:
if longest_list_length < len(SCREAMING_SNAKE_CASE ):
__UpperCamelCase :Optional[Any] = len(SCREAMING_SNAKE_CASE )
__UpperCamelCase :int = divide_by_number
else:
has_been_divided.append(SCREAMING_SNAKE_CASE )
__UpperCamelCase :Any = now_divide * 10 % divide_by_number
return the_digit
# Tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 43
| 0
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class snake_case__(UpperCAmelCase_ ):
"""simple docstring"""
lowercase_ = """facebook/bart-large-mnli"""
lowercase_ = (
"""This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which """
"""should be the text to classify, and `labels`, which should be the list of labels to use for classification. """
"""It returns the most likely label in the list of provided `labels` for the input text."""
)
lowercase_ = """text_classifier"""
lowercase_ = AutoTokenizer
lowercase_ = AutoModelForSequenceClassification
lowercase_ = ["""text""", ["""text"""]]
lowercase_ = ["""text"""]
def snake_case ( self : Union[str, Any] ):
super().setup()
lowercase__ : int = self.model.config
lowercase__ : Optional[Any] = -1
for idx, label in config.idalabel.items():
if label.lower().startswith("entail" ):
lowercase__ : List[Any] = int(__lowercase )
if self.entailment_id == -1:
raise ValueError("Could not determine the entailment ID from the model config, please pass it at init." )
def snake_case ( self : Dict , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Union[str, Any] ):
lowercase__ : Any = labels
return self.pre_processor(
[text] * len(__lowercase ) , [f"""This example is {label}""" for label in labels] , return_tensors="pt" , padding="max_length" , )
def snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE : Tuple ):
lowercase__ : List[Any] = outputs.logits
lowercase__ : Any = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 130
|
import argparse
import json
from tqdm import tqdm
def lowerCamelCase ( ):
'''simple docstring'''
__UpperCamelCase :Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--src_path''' , type=SCREAMING_SNAKE_CASE , default='''biencoder-nq-dev.json''' , help='''Path to raw DPR training data''' , )
parser.add_argument(
'''--evaluation_set''' , type=SCREAMING_SNAKE_CASE , help='''where to store parsed evaluation_set file''' , )
parser.add_argument(
'''--gold_data_path''' , type=SCREAMING_SNAKE_CASE , help='''where to store parsed gold_data_path file''' , )
__UpperCamelCase :str = parser.parse_args()
with open(args.src_path , '''r''' ) as src_file, open(args.evaluation_set , '''w''' ) as eval_file, open(
args.gold_data_path , '''w''' ) as gold_file:
__UpperCamelCase :List[str] = json.load(SCREAMING_SNAKE_CASE )
for dpr_record in tqdm(SCREAMING_SNAKE_CASE ):
__UpperCamelCase :List[str] = dpr_record['''question''']
__UpperCamelCase :Tuple = [context['''title'''] for context in dpr_record['''positive_ctxs''']]
eval_file.write(question + '''\n''' )
gold_file.write('''\t'''.join(SCREAMING_SNAKE_CASE ) + '''\n''' )
if __name__ == "__main__":
main()
| 43
| 0
|
"""simple docstring"""
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
A__ : Optional[int] = 'CompVis/stable-diffusion-v1-1'
A__ : str = 'CompVis/stable-diffusion-v1-2'
A__ : Any = 'CompVis/stable-diffusion-v1-3'
A__ : List[str] = 'CompVis/stable-diffusion-v1-4'
class lowercase__ ( UpperCAmelCase_ ):
def __init__( self : Optional[Any] , snake_case__ : int , snake_case__ : List[Any] , snake_case__ : str , snake_case__ : Tuple , snake_case__ : Optional[int] , snake_case__ : str , snake_case__ : List[str] , snake_case__ : Dict = True , ):
super()._init_()
lowerCamelCase_ : Dict =StableDiffusionPipeline.from_pretrained(__lowercase )
lowerCamelCase_ : Union[str, Any] =StableDiffusionPipeline.from_pretrained(__lowercase )
lowerCamelCase_ : Optional[int] =StableDiffusionPipeline.from_pretrained(__lowercase )
lowerCamelCase_ : Tuple =StableDiffusionPipeline(
vae=__lowercase , text_encoder=__lowercase , tokenizer=__lowercase , unet=__lowercase , scheduler=__lowercase , safety_checker=__lowercase , feature_extractor=__lowercase , requires_safety_checker=__lowercase , )
self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea )
@property
def UpperCAmelCase__ ( self : Optional[int] ):
return {k: getattr(self , __lowercase ) for k in self.config.keys() if not k.startswith("_" )}
def UpperCAmelCase__ ( self : List[Any] , snake_case__ : Optional[int] = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
lowerCamelCase_ : Optional[Any] =self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__lowercase )
def UpperCAmelCase__ ( self : Dict ):
self.enable_attention_slicing(__lowercase )
@torch.no_grad()
def UpperCAmelCase__ ( self : Tuple , snake_case__ : Optional[int] , snake_case__ : str = 512 , snake_case__ : List[Any] = 512 , snake_case__ : Tuple = 50 , snake_case__ : Optional[int] = 7.5 , snake_case__ : Union[str, Any] = None , snake_case__ : List[str] = 1 , snake_case__ : Optional[int] = 0.0 , snake_case__ : Optional[int] = None , snake_case__ : Dict = None , snake_case__ : int = "pil" , snake_case__ : str = True , snake_case__ : List[str] = None , snake_case__ : str = 1 , **snake_case__ : List[Any] , ):
return self.pipea(
prompt=__lowercase , height=__lowercase , width=__lowercase , num_inference_steps=__lowercase , guidance_scale=__lowercase , negative_prompt=__lowercase , num_images_per_prompt=__lowercase , eta=__lowercase , generator=__lowercase , latents=__lowercase , output_type=__lowercase , return_dict=__lowercase , callback=__lowercase , callback_steps=__lowercase , **__lowercase , )
@torch.no_grad()
def UpperCAmelCase__ ( self : int , snake_case__ : List[str] , snake_case__ : Optional[Any] = 512 , snake_case__ : Optional[int] = 512 , snake_case__ : int = 50 , snake_case__ : int = 7.5 , snake_case__ : str = None , snake_case__ : Optional[int] = 1 , snake_case__ : int = 0.0 , snake_case__ : Tuple = None , snake_case__ : List[str] = None , snake_case__ : Optional[Any] = "pil" , snake_case__ : List[str] = True , snake_case__ : str = None , snake_case__ : Optional[Any] = 1 , **snake_case__ : List[str] , ):
return self.pipea(
prompt=__lowercase , height=__lowercase , width=__lowercase , num_inference_steps=__lowercase , guidance_scale=__lowercase , negative_prompt=__lowercase , num_images_per_prompt=__lowercase , eta=__lowercase , generator=__lowercase , latents=__lowercase , output_type=__lowercase , return_dict=__lowercase , callback=__lowercase , callback_steps=__lowercase , **__lowercase , )
@torch.no_grad()
def UpperCAmelCase__ ( self : Dict , snake_case__ : Any , snake_case__ : List[Any] = 512 , snake_case__ : Tuple = 512 , snake_case__ : Tuple = 50 , snake_case__ : Dict = 7.5 , snake_case__ : int = None , snake_case__ : Any = 1 , snake_case__ : str = 0.0 , snake_case__ : List[str] = None , snake_case__ : List[Any] = None , snake_case__ : Optional[int] = "pil" , snake_case__ : Optional[int] = True , snake_case__ : Dict = None , snake_case__ : Any = 1 , **snake_case__ : Any , ):
return self.pipea(
prompt=__lowercase , height=__lowercase , width=__lowercase , num_inference_steps=__lowercase , guidance_scale=__lowercase , negative_prompt=__lowercase , num_images_per_prompt=__lowercase , eta=__lowercase , generator=__lowercase , latents=__lowercase , output_type=__lowercase , return_dict=__lowercase , callback=__lowercase , callback_steps=__lowercase , **__lowercase , )
@torch.no_grad()
def UpperCAmelCase__ ( self : Union[str, Any] , snake_case__ : Any , snake_case__ : Optional[Any] = 512 , snake_case__ : str = 512 , snake_case__ : Optional[Any] = 50 , snake_case__ : Optional[int] = 7.5 , snake_case__ : Optional[Any] = None , snake_case__ : List[Any] = 1 , snake_case__ : List[Any] = 0.0 , snake_case__ : str = None , snake_case__ : Union[str, Any] = None , snake_case__ : Tuple = "pil" , snake_case__ : Union[str, Any] = True , snake_case__ : int = None , snake_case__ : List[str] = 1 , **snake_case__ : Tuple , ):
return self.pipea(
prompt=__lowercase , height=__lowercase , width=__lowercase , num_inference_steps=__lowercase , guidance_scale=__lowercase , negative_prompt=__lowercase , num_images_per_prompt=__lowercase , eta=__lowercase , generator=__lowercase , latents=__lowercase , output_type=__lowercase , return_dict=__lowercase , callback=__lowercase , callback_steps=__lowercase , **__lowercase , )
@torch.no_grad()
def UpperCAmelCase__ ( self : int , snake_case__ : Optional[int] , snake_case__ : Tuple = 512 , snake_case__ : List[Any] = 512 , snake_case__ : int = 50 , snake_case__ : Dict = 7.5 , snake_case__ : Dict = None , snake_case__ : Any = 1 , snake_case__ : Dict = 0.0 , snake_case__ : Optional[int] = None , snake_case__ : List[str] = None , snake_case__ : Tuple = "pil" , snake_case__ : Any = True , snake_case__ : int = None , snake_case__ : Union[str, Any] = 1 , **snake_case__ : Any , ):
lowerCamelCase_ : Optional[Any] ='''cuda''' if torch.cuda.is_available() else '''cpu'''
self.to(__lowercase )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"""`height` and `width` must be divisible by 8 but are {height} and {width}.""" )
# Get first result from Stable Diffusion Checkpoint v1.1
lowerCamelCase_ : Tuple =self.textaimg_sda_a(
prompt=__lowercase , height=__lowercase , width=__lowercase , num_inference_steps=__lowercase , guidance_scale=__lowercase , negative_prompt=__lowercase , num_images_per_prompt=__lowercase , eta=__lowercase , generator=__lowercase , latents=__lowercase , output_type=__lowercase , return_dict=__lowercase , callback=__lowercase , callback_steps=__lowercase , **__lowercase , )
# Get first result from Stable Diffusion Checkpoint v1.2
lowerCamelCase_ : Any =self.textaimg_sda_a(
prompt=__lowercase , height=__lowercase , width=__lowercase , num_inference_steps=__lowercase , guidance_scale=__lowercase , negative_prompt=__lowercase , num_images_per_prompt=__lowercase , eta=__lowercase , generator=__lowercase , latents=__lowercase , output_type=__lowercase , return_dict=__lowercase , callback=__lowercase , callback_steps=__lowercase , **__lowercase , )
# Get first result from Stable Diffusion Checkpoint v1.3
lowerCamelCase_ : Dict =self.textaimg_sda_a(
prompt=__lowercase , height=__lowercase , width=__lowercase , num_inference_steps=__lowercase , guidance_scale=__lowercase , negative_prompt=__lowercase , num_images_per_prompt=__lowercase , eta=__lowercase , generator=__lowercase , latents=__lowercase , output_type=__lowercase , return_dict=__lowercase , callback=__lowercase , callback_steps=__lowercase , **__lowercase , )
# Get first result from Stable Diffusion Checkpoint v1.4
lowerCamelCase_ : Optional[Any] =self.textaimg_sda_a(
prompt=__lowercase , height=__lowercase , width=__lowercase , num_inference_steps=__lowercase , guidance_scale=__lowercase , negative_prompt=__lowercase , num_images_per_prompt=__lowercase , eta=__lowercase , generator=__lowercase , latents=__lowercase , output_type=__lowercase , return_dict=__lowercase , callback=__lowercase , callback_steps=__lowercase , **__lowercase , )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
| 144
|
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
__lowercase = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
__lowercase = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
__lowercase = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1000))
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Any = len([g for position, g in enumerate(SCREAMING_SNAKE_CASE ) if g == main_target[position]] )
return (item, float(SCREAMING_SNAKE_CASE ))
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Optional[Any] = random.randint(0 , len(SCREAMING_SNAKE_CASE ) - 1 )
__UpperCamelCase :Tuple = parent_a[:random_slice] + parent_a[random_slice:]
__UpperCamelCase :Union[str, Any] = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :int = list(SCREAMING_SNAKE_CASE )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
__UpperCamelCase :str = random.choice(SCREAMING_SNAKE_CASE )
return "".join(SCREAMING_SNAKE_CASE )
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ):
'''simple docstring'''
__UpperCamelCase :int = []
# Generate more children proportionally to the fitness score.
__UpperCamelCase :int = int(parent_a[1] * 100 ) + 1
__UpperCamelCase :List[str] = 10 if child_n >= 10 else child_n
for _ in range(SCREAMING_SNAKE_CASE ):
__UpperCamelCase :Optional[Any] = population_score[random.randint(0 , SCREAMING_SNAKE_CASE )][0]
__UpperCamelCase , __UpperCamelCase :Any = crossover(parent_a[0] , SCREAMING_SNAKE_CASE )
# Append new string to the population list.
pop.append(mutate(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
pop.append(mutate(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
return pop
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = True ):
'''simple docstring'''
if N_POPULATION < N_SELECTED:
__UpperCamelCase :List[Any] = f"""{N_POPULATION} must be bigger than {N_SELECTED}"""
raise ValueError(SCREAMING_SNAKE_CASE )
# Verify that the target contains no genes besides the ones inside genes variable.
__UpperCamelCase :List[str] = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
__UpperCamelCase :Optional[int] = f"""{not_in_genes_list} is not in genes list, evolution cannot converge"""
raise ValueError(SCREAMING_SNAKE_CASE )
# Generate random starting population.
__UpperCamelCase :int = []
for _ in range(SCREAMING_SNAKE_CASE ):
population.append(''''''.join([random.choice(SCREAMING_SNAKE_CASE ) for i in range(len(SCREAMING_SNAKE_CASE ) )] ) )
# Just some logs to know what the algorithms is doing.
__UpperCamelCase , __UpperCamelCase :List[Any] = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(SCREAMING_SNAKE_CASE )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
__UpperCamelCase :Tuple = [evaluate(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for item in population]
# Check if there is a matching evolution.
__UpperCamelCase :Tuple = sorted(SCREAMING_SNAKE_CASE , key=lambda SCREAMING_SNAKE_CASE : x[1] , reverse=SCREAMING_SNAKE_CASE )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
f"""\nGeneration: {generation}"""
f"""\nTotal Population:{total_population}"""
f"""\nBest score: {population_score[0][1]}"""
f"""\nBest string: {population_score[0][0]}""" )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
__UpperCamelCase :str = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(SCREAMING_SNAKE_CASE )
# Normalize population score to be between 0 and 1.
__UpperCamelCase :Union[str, Any] = [
(item, score / len(SCREAMING_SNAKE_CASE )) for item, score in population_score
]
# This is selection
for i in range(SCREAMING_SNAKE_CASE ):
population.extend(select(population_score[int(SCREAMING_SNAKE_CASE )] , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(SCREAMING_SNAKE_CASE ) > N_POPULATION:
break
if __name__ == "__main__":
__lowercase = (
'''This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!'''
)
__lowercase = list(
''' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm'''
'''nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\'''
)
__lowercase , __lowercase , __lowercase = basic(target_str, genes_list)
print(
F'\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}'
)
| 43
| 0
|
"""simple docstring"""
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class lowercase( UpperCAmelCase_ ):
'''simple docstring'''
lowercase__ = """char"""
lowercase__ = """bpe"""
lowercase__ = """wp"""
A_ = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class lowercase( UpperCAmelCase_ ):
'''simple docstring'''
lowercase__ = ["""image_processor""", """char_tokenizer"""]
lowercase__ = """ViTImageProcessor"""
lowercase__ = """MgpstrTokenizer"""
def __init__( self: str, a_: str=None, a_: Optional[Any]=None, **a_: str ):
'''simple docstring'''
_snake_case : Tuple = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""", __lowercase, )
_snake_case : Any = kwargs.pop("""feature_extractor""" )
_snake_case : List[str] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
_snake_case : Optional[Any] = tokenizer
_snake_case : Any = AutoTokenizer.from_pretrained("""gpt2""" )
_snake_case : List[Any] = AutoTokenizer.from_pretrained("""bert-base-uncased""" )
super().__init__(__lowercase, __lowercase )
def __call__( self: Dict, a_: List[str]=None, a_: List[Any]=None, a_: Dict=None, **a_: Tuple ):
'''simple docstring'''
if images is None and text is None:
raise ValueError("""You need to specify either an `images` or `text` input to process.""" )
if images is not None:
_snake_case : Union[str, Any] = self.image_processor(__lowercase, return_tensors=__lowercase, **__lowercase )
if text is not None:
_snake_case : Dict = self.char_tokenizer(__lowercase, return_tensors=__lowercase, **__lowercase )
if text is None:
return inputs
elif images is None:
return encodings
else:
_snake_case : int = encodings['''input_ids''']
return inputs
def UpperCamelCase_ ( self: List[Any], a_: Optional[int] ):
'''simple docstring'''
_snake_case : Any = sequences
_snake_case : Dict = char_preds.size(0 )
_snake_case : Optional[Any] = self._decode_helper(__lowercase, """char""" )
_snake_case : Dict = self._decode_helper(__lowercase, """bpe""" )
_snake_case : Optional[int] = self._decode_helper(__lowercase, """wp""" )
_snake_case : Optional[int] = []
_snake_case : str = []
for i in range(__lowercase ):
_snake_case : Optional[int] = [char_scores[i], bpe_scores[i], wp_scores[i]]
_snake_case : Any = [char_strs[i], bpe_strs[i], wp_strs[i]]
_snake_case : Optional[Any] = scores.index(max(__lowercase ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
_snake_case : int = {}
_snake_case : Dict = final_strs
_snake_case : Tuple = final_scores
_snake_case : Dict = char_strs
_snake_case : List[str] = bpe_strs
_snake_case : Union[str, Any] = wp_strs
return out
def UpperCamelCase_ ( self: Optional[Any], a_: str, a_: Optional[Any] ):
'''simple docstring'''
if format == DecodeType.CHARACTER:
_snake_case : Tuple = self.char_decode
_snake_case : Optional[Any] = 1
_snake_case : Any = '''[s]'''
elif format == DecodeType.BPE:
_snake_case : int = self.bpe_decode
_snake_case : Dict = 2
_snake_case : int = '''#'''
elif format == DecodeType.WORDPIECE:
_snake_case : Optional[int] = self.wp_decode
_snake_case : List[str] = 102
_snake_case : Tuple = '''[SEP]'''
else:
raise ValueError(f"Format {format} is not supported." )
_snake_case : List[str] = [], []
_snake_case : List[Any] = pred_logits.size(0 )
_snake_case : List[str] = pred_logits.size(1 )
_snake_case : List[str] = pred_logits.topk(1, dim=-1, largest=__lowercase, sorted=__lowercase )
_snake_case : List[Any] = preds_index.view(-1, __lowercase )[:, 1:]
_snake_case : Optional[Any] = decoder(__lowercase )
_snake_case : str = torch.nn.functional.softmax(__lowercase, dim=2 ).max(dim=2 )
_snake_case : str = preds_max_prob[:, 1:]
for index in range(__lowercase ):
_snake_case : int = preds_str[index].find(__lowercase )
_snake_case : Union[str, Any] = preds_str[index][:pred_eos]
_snake_case : int = preds_index[index].cpu().tolist()
_snake_case : str = pred_index.index(__lowercase ) if eos_token in pred_index else -1
_snake_case : Optional[int] = preds_max_prob[index][: pred_eos_index + 1]
_snake_case : str = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(__lowercase )
conf_scores.append(__lowercase )
return dec_strs, conf_scores
def UpperCamelCase_ ( self: List[str], a_: int ):
'''simple docstring'''
_snake_case : Optional[int] = [seq.replace(""" """, """""" ) for seq in self.char_tokenizer.batch_decode(__lowercase )]
return decode_strs
def UpperCamelCase_ ( self: int, a_: Optional[Any] ):
'''simple docstring'''
return self.bpe_tokenizer.batch_decode(__lowercase )
def UpperCamelCase_ ( self: Optional[int], a_: int ):
'''simple docstring'''
_snake_case : Optional[int] = [seq.replace(""" """, """""" ) for seq in self.wp_tokenizer.batch_decode(__lowercase )]
return decode_strs
| 64
|
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
__lowercase = 16
__lowercase = 32
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 16 , SCREAMING_SNAKE_CASE = "bert-base-cased" ):
'''simple docstring'''
__UpperCamelCase :List[str] = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE )
__UpperCamelCase :Any = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(SCREAMING_SNAKE_CASE ):
# max_length=None => use the model max length (it's actually the default)
__UpperCamelCase :int = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
__UpperCamelCase :Tuple = datasets.map(
SCREAMING_SNAKE_CASE , batched=SCREAMING_SNAKE_CASE , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , load_from_cache_file=SCREAMING_SNAKE_CASE )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__UpperCamelCase :List[str] = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(SCREAMING_SNAKE_CASE ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(SCREAMING_SNAKE_CASE , padding='''max_length''' , max_length=128 , return_tensors='''pt''' )
return tokenizer.pad(SCREAMING_SNAKE_CASE , padding='''longest''' , return_tensors='''pt''' )
# Instantiate dataloaders.
__UpperCamelCase :Union[str, Any] = DataLoader(
tokenized_datasets['''train'''] , shuffle=SCREAMING_SNAKE_CASE , collate_fn=SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE )
__UpperCamelCase :Dict = DataLoader(
tokenized_datasets['''validation'''] , shuffle=SCREAMING_SNAKE_CASE , collate_fn=SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE )
return train_dataloader, eval_dataloader
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :List[Any] = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__UpperCamelCase :int = config['''lr''']
__UpperCamelCase :str = int(config['''num_epochs'''] )
__UpperCamelCase :Any = int(config['''seed'''] )
__UpperCamelCase :Dict = int(config['''batch_size'''] )
__UpperCamelCase :Optional[Any] = args.model_name_or_path
set_seed(SCREAMING_SNAKE_CASE )
__UpperCamelCase , __UpperCamelCase :Dict = get_dataloaders(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__UpperCamelCase :Any = AutoModelForSequenceClassification.from_pretrained(SCREAMING_SNAKE_CASE , return_dict=SCREAMING_SNAKE_CASE )
# Instantiate optimizer
__UpperCamelCase :List[str] = (
AdamW
if accelerator.state.deepspeed_plugin is None
or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
__UpperCamelCase :Optional[Any] = optimizer_cls(params=model.parameters() , lr=SCREAMING_SNAKE_CASE )
if accelerator.state.deepspeed_plugin is not None:
__UpperCamelCase :Dict = accelerator.state.deepspeed_plugin.deepspeed_config[
'''gradient_accumulation_steps'''
]
else:
__UpperCamelCase :Dict = 1
__UpperCamelCase :Tuple = (len(SCREAMING_SNAKE_CASE ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
__UpperCamelCase :str = get_linear_schedule_with_warmup(
optimizer=SCREAMING_SNAKE_CASE , num_warmup_steps=0 , num_training_steps=SCREAMING_SNAKE_CASE , )
else:
__UpperCamelCase :Dict = DummyScheduler(SCREAMING_SNAKE_CASE , total_num_steps=SCREAMING_SNAKE_CASE , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase :int = accelerator.prepare(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# We need to keep track of how many total steps we have iterated over
__UpperCamelCase :List[Any] = 0
# We also need to keep track of the stating epoch so files are named properly
__UpperCamelCase :Dict = 0
# Now we train the model
__UpperCamelCase :Any = evaluate.load('''glue''' , '''mrpc''' )
__UpperCamelCase :Union[str, Any] = 0
__UpperCamelCase :Optional[int] = {}
for epoch in range(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
model.train()
for step, batch in enumerate(SCREAMING_SNAKE_CASE ):
__UpperCamelCase :Optional[Any] = model(**SCREAMING_SNAKE_CASE )
__UpperCamelCase :Tuple = outputs.loss
__UpperCamelCase :str = loss / gradient_accumulation_steps
accelerator.backward(SCREAMING_SNAKE_CASE )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
__UpperCamelCase :Any = 0
for step, batch in enumerate(SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__UpperCamelCase :Any = model(**SCREAMING_SNAKE_CASE )
__UpperCamelCase :Optional[int] = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
__UpperCamelCase , __UpperCamelCase :List[Any] = accelerator.gather(
(predictions, batch['''labels''']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(SCREAMING_SNAKE_CASE ) - 1:
__UpperCamelCase :List[str] = predictions[: len(eval_dataloader.dataset ) - samples_seen]
__UpperCamelCase :Optional[int] = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=SCREAMING_SNAKE_CASE , references=SCREAMING_SNAKE_CASE , )
__UpperCamelCase :Dict = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , SCREAMING_SNAKE_CASE )
__UpperCamelCase :str = eval_metric['''accuracy''']
if best_performance < eval_metric["accuracy"]:
__UpperCamelCase :int = eval_metric['''accuracy''']
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), f"""Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}"""
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , '''all_results.json''' ) , '''w''' ) as f:
json.dump(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def lowerCamelCase ( ):
'''simple docstring'''
__UpperCamelCase :Tuple = argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''' )
parser.add_argument(
'''--model_name_or_path''' , type=SCREAMING_SNAKE_CASE , default='''bert-base-cased''' , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=SCREAMING_SNAKE_CASE , )
parser.add_argument(
'''--output_dir''' , type=SCREAMING_SNAKE_CASE , default='''.''' , help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' , )
parser.add_argument(
'''--performance_lower_bound''' , type=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , help='''Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.''' , )
parser.add_argument(
'''--num_epochs''' , type=SCREAMING_SNAKE_CASE , default=3 , help='''Number of train epochs.''' , )
__UpperCamelCase :List[str] = parser.parse_args()
__UpperCamelCase :Tuple = {'''lr''': 2e-5, '''num_epochs''': args.num_epochs, '''seed''': 42, '''batch_size''': 16}
training_function(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 43
| 0
|
"""simple docstring"""
from __future__ import annotations
from collections.abc import Callable
from typing import Any, Generic, TypeVar
lowerCamelCase_ : str = TypeVar("""T""")
class __A ( Generic[T] ):
"""simple docstring"""
def __init__( self , __A , __A ) -> None:
a =None
a =len(__lowercase )
a =[any_type for _ in range(self.N )] + arr
a =fnc
self.build()
def SCREAMING_SNAKE_CASE ( self ) -> None:
for p in range(self.N - 1 , 0 , -1 ):
a =self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def SCREAMING_SNAKE_CASE ( self , __A , __A ) -> None:
p += self.N
a =v
while p > 1:
a =p // 2
a =self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def SCREAMING_SNAKE_CASE ( self , __A , __A ) -> T | None: # noqa: E741
a =l + self.N, r + self.N
a =None
while l <= r:
if l % 2 == 1:
a =self.st[l] if res is None else self.fn(__lowercase , self.st[l] )
if r % 2 == 0:
a =self.st[r] if res is None else self.fn(__lowercase , self.st[r] )
a =(l + 1) // 2, (r - 1) // 2
return res
if __name__ == "__main__":
from functools import reduce
lowerCamelCase_ : int = [1, 1_0, -2, 9, -3, 8, 4, -7, 5, 6, 1_1, -1_2]
lowerCamelCase_ : Union[str, Any] = {
0: 7,
1: 2,
2: 6,
3: -1_4,
4: 5,
5: 4,
6: 7,
7: -1_0,
8: 9,
9: 1_0,
1_0: 1_2,
1_1: 1,
}
lowerCamelCase_ : Optional[int] = SegmentTree(test_array, min)
lowerCamelCase_ : Any = SegmentTree(test_array, max)
lowerCamelCase_ : Union[str, Any] = SegmentTree(test_array, lambda a, b: a + b)
def _A ( ):
"""simple docstring"""
for i in range(len(lowercase ) ):
for j in range(lowercase , len(lowercase ) ):
a =reduce(lowercase , test_array[i : j + 1] )
a =reduce(lowercase , test_array[i : j + 1] )
a =reduce(lambda lowercase , lowercase : a + b , test_array[i : j + 1] )
assert min_range == min_segment_tree.query(lowercase , lowercase )
assert max_range == max_segment_tree.query(lowercase , lowercase )
assert sum_range == sum_segment_tree.query(lowercase , lowercase )
test_all_segments()
for index, value in test_updates.items():
lowerCamelCase_ : str = value
min_segment_tree.update(index, value)
max_segment_tree.update(index, value)
sum_segment_tree.update(index, value)
test_all_segments()
| 81
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__lowercase = logging.get_logger(__name__)
__lowercase = {
'''SenseTime/deformable-detr''': '''https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json''',
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
a__ : List[str] = """deformable_detr"""
a__ : Union[str, Any] = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self , __lowercase=True , __lowercase=None , __lowercase=3 , __lowercase=300 , __lowercase=1_024 , __lowercase=6 , __lowercase=1_024 , __lowercase=8 , __lowercase=6 , __lowercase=1_024 , __lowercase=8 , __lowercase=0.0 , __lowercase=True , __lowercase="relu" , __lowercase=256 , __lowercase=0.1 , __lowercase=0.0 , __lowercase=0.0 , __lowercase=0.02 , __lowercase=1.0 , __lowercase=True , __lowercase=False , __lowercase="sine" , __lowercase="resnet50" , __lowercase=True , __lowercase=False , __lowercase=4 , __lowercase=4 , __lowercase=4 , __lowercase=False , __lowercase=300 , __lowercase=False , __lowercase=1 , __lowercase=5 , __lowercase=2 , __lowercase=1 , __lowercase=1 , __lowercase=5 , __lowercase=2 , __lowercase=0.1 , __lowercase=0.25 , __lowercase=False , **__lowercase , ) -> int:
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''')
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''')
__UpperCamelCase :str = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''])
elif isinstance(__lowercase , __lowercase):
__UpperCamelCase :str = backbone_config.get('''model_type''')
__UpperCamelCase :Tuple = CONFIG_MAPPING[backbone_model_type]
__UpperCamelCase :Any = config_class.from_dict(__lowercase)
__UpperCamelCase :int = use_timm_backbone
__UpperCamelCase :Dict = backbone_config
__UpperCamelCase :Any = num_channels
__UpperCamelCase :Optional[int] = num_queries
__UpperCamelCase :Any = max_position_embeddings
__UpperCamelCase :str = d_model
__UpperCamelCase :Tuple = encoder_ffn_dim
__UpperCamelCase :Union[str, Any] = encoder_layers
__UpperCamelCase :List[Any] = encoder_attention_heads
__UpperCamelCase :Any = decoder_ffn_dim
__UpperCamelCase :List[str] = decoder_layers
__UpperCamelCase :int = decoder_attention_heads
__UpperCamelCase :str = dropout
__UpperCamelCase :Any = attention_dropout
__UpperCamelCase :int = activation_dropout
__UpperCamelCase :List[Any] = activation_function
__UpperCamelCase :List[Any] = init_std
__UpperCamelCase :List[Any] = init_xavier_std
__UpperCamelCase :int = encoder_layerdrop
__UpperCamelCase :str = auxiliary_loss
__UpperCamelCase :Optional[Any] = position_embedding_type
__UpperCamelCase :Union[str, Any] = backbone
__UpperCamelCase :Any = use_pretrained_backbone
__UpperCamelCase :str = dilation
# deformable attributes
__UpperCamelCase :Optional[Any] = num_feature_levels
__UpperCamelCase :str = encoder_n_points
__UpperCamelCase :int = decoder_n_points
__UpperCamelCase :Union[str, Any] = two_stage
__UpperCamelCase :Optional[Any] = two_stage_num_proposals
__UpperCamelCase :Dict = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError('''If two_stage is True, with_box_refine must be True.''')
# Hungarian matcher
__UpperCamelCase :Optional[int] = class_cost
__UpperCamelCase :List[Any] = bbox_cost
__UpperCamelCase :str = giou_cost
# Loss coefficients
__UpperCamelCase :Tuple = mask_loss_coefficient
__UpperCamelCase :Tuple = dice_loss_coefficient
__UpperCamelCase :int = bbox_loss_coefficient
__UpperCamelCase :Any = giou_loss_coefficient
__UpperCamelCase :Dict = eos_coefficient
__UpperCamelCase :Optional[Any] = focal_alpha
__UpperCamelCase :Optional[Any] = disable_custom_kernels
super().__init__(is_encoder_decoder=__lowercase , **__lowercase)
@property
def UpperCamelCase__ ( self) -> int:
return self.encoder_attention_heads
@property
def UpperCamelCase__ ( self) -> int:
return self.d_model
def UpperCamelCase__ ( self) -> List[Any]:
__UpperCamelCase :Dict = copy.deepcopy(self.__dict__)
if self.backbone_config is not None:
__UpperCamelCase :Tuple = self.backbone_config.to_dict()
__UpperCamelCase :List[Any] = self.__class__.model_type
return output
| 43
| 0
|
import argparse
import torch
from transformers import (
EncodecConfig,
EncodecFeatureExtractor,
EncodecModel,
logging,
)
# checkpoints downloaded from:
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
logging.set_verbosity_info()
a__: Optional[int] = logging.get_logger('transformers.models.encodec')
a__: Union[str, Any] = {
'quantizer.vq.layers.*._codebook.inited': 'quantizer.layers.*.codebook.inited',
'quantizer.vq.layers.*._codebook.cluster_size': 'quantizer.layers.*.codebook.cluster_size',
'quantizer.vq.layers.*._codebook.embed': 'quantizer.layers.*.codebook.embed',
'quantizer.vq.layers.*._codebook.embed_avg': 'quantizer.layers.*.codebook.embed_avg',
}
a__: List[str] = {
'encoder.model.0.conv.conv': 'encoder.layers.0.conv',
'encoder.model.1.block.1.conv.conv': 'encoder.layers.1.block.1.conv',
'encoder.model.1.block.3.conv.conv': 'encoder.layers.1.block.3.conv',
'encoder.model.1.shortcut.conv.conv': 'encoder.layers.1.shortcut.conv',
'encoder.model.3.conv.conv': 'encoder.layers.3.conv',
'encoder.model.4.block.1.conv.conv': 'encoder.layers.4.block.1.conv',
'encoder.model.4.block.3.conv.conv': 'encoder.layers.4.block.3.conv',
'encoder.model.4.shortcut.conv.conv': 'encoder.layers.4.shortcut.conv',
'encoder.model.6.conv.conv': 'encoder.layers.6.conv',
'encoder.model.7.block.1.conv.conv': 'encoder.layers.7.block.1.conv',
'encoder.model.7.block.3.conv.conv': 'encoder.layers.7.block.3.conv',
'encoder.model.7.shortcut.conv.conv': 'encoder.layers.7.shortcut.conv',
'encoder.model.9.conv.conv': 'encoder.layers.9.conv',
'encoder.model.10.block.1.conv.conv': 'encoder.layers.10.block.1.conv',
'encoder.model.10.block.3.conv.conv': 'encoder.layers.10.block.3.conv',
'encoder.model.10.shortcut.conv.conv': 'encoder.layers.10.shortcut.conv',
'encoder.model.12.conv.conv': 'encoder.layers.12.conv',
'encoder.model.13.lstm': 'encoder.layers.13.lstm',
'encoder.model.15.conv.conv': 'encoder.layers.15.conv',
}
a__: str = {
'encoder.model.0.conv.norm': 'encoder.layers.0.norm',
'encoder.model.1.block.1.conv.norm': 'encoder.layers.1.block.1.norm',
'encoder.model.1.block.3.conv.norm': 'encoder.layers.1.block.3.norm',
'encoder.model.1.shortcut.conv.norm': 'encoder.layers.1.shortcut.norm',
'encoder.model.3.conv.norm': 'encoder.layers.3.norm',
'encoder.model.4.block.1.conv.norm': 'encoder.layers.4.block.1.norm',
'encoder.model.4.block.3.conv.norm': 'encoder.layers.4.block.3.norm',
'encoder.model.4.shortcut.conv.norm': 'encoder.layers.4.shortcut.norm',
'encoder.model.6.conv.norm': 'encoder.layers.6.norm',
'encoder.model.7.block.1.conv.norm': 'encoder.layers.7.block.1.norm',
'encoder.model.7.block.3.conv.norm': 'encoder.layers.7.block.3.norm',
'encoder.model.7.shortcut.conv.norm': 'encoder.layers.7.shortcut.norm',
'encoder.model.9.conv.norm': 'encoder.layers.9.norm',
'encoder.model.10.block.1.conv.norm': 'encoder.layers.10.block.1.norm',
'encoder.model.10.block.3.conv.norm': 'encoder.layers.10.block.3.norm',
'encoder.model.10.shortcut.conv.norm': 'encoder.layers.10.shortcut.norm',
'encoder.model.12.conv.norm': 'encoder.layers.12.norm',
'encoder.model.15.conv.norm': 'encoder.layers.15.norm',
}
a__: List[Any] = {
'decoder.model.0.conv.conv': 'decoder.layers.0.conv',
'decoder.model.1.lstm': 'decoder.layers.1.lstm',
'decoder.model.3.convtr.convtr': 'decoder.layers.3.conv',
'decoder.model.4.block.1.conv.conv': 'decoder.layers.4.block.1.conv',
'decoder.model.4.block.3.conv.conv': 'decoder.layers.4.block.3.conv',
'decoder.model.4.shortcut.conv.conv': 'decoder.layers.4.shortcut.conv',
'decoder.model.6.convtr.convtr': 'decoder.layers.6.conv',
'decoder.model.7.block.1.conv.conv': 'decoder.layers.7.block.1.conv',
'decoder.model.7.block.3.conv.conv': 'decoder.layers.7.block.3.conv',
'decoder.model.7.shortcut.conv.conv': 'decoder.layers.7.shortcut.conv',
'decoder.model.9.convtr.convtr': 'decoder.layers.9.conv',
'decoder.model.10.block.1.conv.conv': 'decoder.layers.10.block.1.conv',
'decoder.model.10.block.3.conv.conv': 'decoder.layers.10.block.3.conv',
'decoder.model.10.shortcut.conv.conv': 'decoder.layers.10.shortcut.conv',
'decoder.model.12.convtr.convtr': 'decoder.layers.12.conv',
'decoder.model.13.block.1.conv.conv': 'decoder.layers.13.block.1.conv',
'decoder.model.13.block.3.conv.conv': 'decoder.layers.13.block.3.conv',
'decoder.model.13.shortcut.conv.conv': 'decoder.layers.13.shortcut.conv',
'decoder.model.15.conv.conv': 'decoder.layers.15.conv',
}
a__: Dict = {
'decoder.model.0.conv.norm': 'decoder.layers.0.norm',
'decoder.model.3.convtr.norm': 'decoder.layers.3.norm',
'decoder.model.4.block.1.conv.norm': 'decoder.layers.4.block.1.norm',
'decoder.model.4.block.3.conv.norm': 'decoder.layers.4.block.3.norm',
'decoder.model.4.shortcut.conv.norm': 'decoder.layers.4.shortcut.norm',
'decoder.model.6.convtr.norm': 'decoder.layers.6.norm',
'decoder.model.7.block.1.conv.norm': 'decoder.layers.7.block.1.norm',
'decoder.model.7.block.3.conv.norm': 'decoder.layers.7.block.3.norm',
'decoder.model.7.shortcut.conv.norm': 'decoder.layers.7.shortcut.norm',
'decoder.model.9.convtr.norm': 'decoder.layers.9.norm',
'decoder.model.10.block.1.conv.norm': 'decoder.layers.10.block.1.norm',
'decoder.model.10.block.3.conv.norm': 'decoder.layers.10.block.3.norm',
'decoder.model.10.shortcut.conv.norm': 'decoder.layers.10.shortcut.norm',
'decoder.model.12.convtr.norm': 'decoder.layers.12.norm',
'decoder.model.13.block.1.conv.norm': 'decoder.layers.13.block.1.norm',
'decoder.model.13.block.3.conv.norm': 'decoder.layers.13.block.3.norm',
'decoder.model.13.shortcut.conv.norm': 'decoder.layers.13.shortcut.norm',
'decoder.model.15.conv.norm': 'decoder.layers.15.norm',
}
a__: List[str] = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_DECODER,
}
a__: Union[str, Any] = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_ENCODER_48K,
**MAPPING_DECODER,
**MAPPING_DECODER_48K,
}
a__: Optional[int] = []
a__: int = []
def UpperCamelCase__( UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : str , UpperCamelCase__ : List[Any] )->str:
for attribute in key.split('''.''' ):
A__ = getattr(UpperCamelCase__ , UpperCamelCase__ )
if weight_type is not None:
A__ = getattr(UpperCamelCase__ , UpperCamelCase__ ).shape
else:
A__ = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
f" {value.shape} for {full_name}" )
if weight_type == "weight":
A__ = value
elif weight_type == "weight_g":
A__ = value
elif weight_type == "weight_v":
A__ = value
elif weight_type == "bias":
A__ = value
elif weight_type == "running_mean":
A__ = value
elif weight_type == "running_var":
A__ = value
elif weight_type == "num_batches_tracked":
A__ = value
elif weight_type == "weight_ih_l0":
A__ = value
elif weight_type == "weight_hh_l0":
A__ = value
elif weight_type == "bias_ih_l0":
A__ = value
elif weight_type == "bias_hh_l0":
A__ = value
elif weight_type == "weight_ih_l1":
A__ = value
elif weight_type == "weight_hh_l1":
A__ = value
elif weight_type == "bias_ih_l1":
A__ = value
elif weight_type == "bias_hh_l1":
A__ = value
else:
A__ = value
logger.info(f"{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}." )
def UpperCamelCase__( UpperCamelCase__ : int , UpperCamelCase__ : int )->str:
for key in ignore_keys:
if key.endswith('''.*''' ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
A__ = key.split('''.*.''' )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def UpperCamelCase__( UpperCamelCase__ : int , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[int] )->str:
A__ = []
if model_name == "encodec_24khz" or "encodec_32khz":
A__ = MAPPING_24K
elif model_name == "encodec_48khz":
A__ = MAPPING_48K
else:
raise ValueError(f"Unsupported model: {model_name}" )
for name, value in orig_dict.items():
if should_ignore(UpperCamelCase__ , UpperCamelCase__ ):
logger.info(f"{name} was ignored" )
continue
A__ = False
for key, mapped_key in MAPPING.items():
if "*" in key:
A__ = key.split('''.*.''' )
if prefix in name and suffix in name:
A__ = suffix
if key in name:
# HACK otherwise .embed gets initialized with .embed_avg too
if key.endswith('''embed''' ) and name.endswith('''embed_avg''' ):
continue
A__ = True
if "*" in mapped_key:
A__ = name.split(UpperCamelCase__ )[0].split('''.''' )[-2]
A__ = mapped_key.replace('''*''' , UpperCamelCase__ )
if "weight_g" in name:
A__ = '''weight_g'''
elif "weight_v" in name:
A__ = '''weight_v'''
elif "weight_ih_l0" in name:
A__ = '''weight_ih_l0'''
elif "weight_hh_l0" in name:
A__ = '''weight_hh_l0'''
elif "bias_ih_l0" in name:
A__ = '''bias_ih_l0'''
elif "bias_hh_l0" in name:
A__ = '''bias_hh_l0'''
elif "weight_ih_l1" in name:
A__ = '''weight_ih_l1'''
elif "weight_hh_l1" in name:
A__ = '''weight_hh_l1'''
elif "bias_ih_l1" in name:
A__ = '''bias_ih_l1'''
elif "bias_hh_l1" in name:
A__ = '''bias_hh_l1'''
elif "bias" in name:
A__ = '''bias'''
elif "weight" in name:
A__ = '''weight'''
elif "running_mean" in name:
A__ = '''running_mean'''
elif "running_var" in name:
A__ = '''running_var'''
elif "num_batches_tracked" in name:
A__ = '''num_batches_tracked'''
else:
A__ = None
set_recursively(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
continue
if not is_used:
unused_weights.append(UpperCamelCase__ )
logger.warning(f"Unused weights: {unused_weights}" )
@torch.no_grad()
def UpperCamelCase__( UpperCamelCase__ : Tuple , UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : Optional[Any]=None , )->Any:
if config_path is not None:
A__ = EncodecConfig.from_pretrained(UpperCamelCase__ )
else:
A__ = EncodecConfig()
if model_name == "encodec_24khz":
pass # config is already correct
elif model_name == "encodec_32khz":
A__ = [8, 5, 4, 4]
A__ = [2.2]
A__ = 64
A__ = 3_20_00
A__ = 20_48
A__ = False
A__ = False
A__ = False
elif model_name == "encodec_48khz":
A__ = [8, 5, 4, 2]
A__ = [3.0, 6.0, 12.0, 24.0]
A__ = 4_80_00
A__ = 2
A__ = False
A__ = '''time_group_norm'''
A__ = True
A__ = 1.0
A__ = 0.01
else:
raise ValueError(f"Unknown model name: {model_name}" )
A__ = EncodecModel(UpperCamelCase__ )
A__ = EncodecFeatureExtractor(
feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , )
feature_extractor.save_pretrained(UpperCamelCase__ )
A__ = torch.load(UpperCamelCase__ )
if "best_state" in original_checkpoint:
# we might have a training state saved, in which case discard the yaml results and just retain the weights
A__ = original_checkpoint['''best_state''']
recursively_load_weights(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
model.save_pretrained(UpperCamelCase__ )
if repo_id:
print('''Pushing to the hub...''' )
feature_extractor.push_to_hub(UpperCamelCase__ )
model.push_to_hub(UpperCamelCase__ )
if __name__ == "__main__":
a__: Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
'--model',
default='encodec_24khz',
type=str,
help='The model to convert. Should be one of \'encodec_24khz\', \'encodec_32khz\', \'encodec_48khz\'.',
)
parser.add_argument('--checkpoint_path', required=True, default=None, type=str, help='Path to original checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--pytorch_dump_folder_path', required=True, default=None, type=str, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.'
)
a__: Optional[int] = parser.parse_args()
convert_checkpoint(
args.model,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 193
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
a__ : Optional[Any] = """facebook/bart-large-mnli"""
a__ : int = (
"""This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which """
"""should be the text to classify, and `labels`, which should be the list of labels to use for classification. """
"""It returns the most likely label in the list of provided `labels` for the input text."""
)
a__ : Optional[Any] = """text_classifier"""
a__ : Any = AutoTokenizer
a__ : str = AutoModelForSequenceClassification
a__ : str = ["""text""", ["""text"""]]
a__ : Optional[int] = ["""text"""]
def UpperCamelCase__ ( self) -> Union[str, Any]:
super().setup()
__UpperCamelCase :int = self.model.config
__UpperCamelCase :Optional[Any] = -1
for idx, label in config.idalabel.items():
if label.lower().startswith('''entail'''):
__UpperCamelCase :List[Any] = int(__lowercase)
if self.entailment_id == -1:
raise ValueError('''Could not determine the entailment ID from the model config, please pass it at init.''')
def UpperCamelCase__ ( self , __lowercase , __lowercase) -> Union[str, Any]:
__UpperCamelCase :Any = labels
return self.pre_processor(
[text] * len(__lowercase) , [f"""This example is {label}""" for label in labels] , return_tensors='''pt''' , padding='''max_length''' , )
def UpperCamelCase__ ( self , __lowercase) -> Optional[Any]:
__UpperCamelCase :List[Any] = outputs.logits
__UpperCamelCase :Any = torch.argmax(logits[:, 2]).item()
return self._labels[label_id]
| 43
| 0
|
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {'''vocab_file''': '''spiece.model'''}
_UpperCamelCase = {
'''vocab_file''': {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/spiece.model''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/spiece.model''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/spiece.model''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model''',
}
}
_UpperCamelCase = {
'''albert-base-v1''': 512,
'''albert-large-v1''': 512,
'''albert-xlarge-v1''': 512,
'''albert-xxlarge-v1''': 512,
'''albert-base-v2''': 512,
'''albert-large-v2''': 512,
'''albert-xlarge-v2''': 512,
'''albert-xxlarge-v2''': 512,
}
_UpperCamelCase = '''▁'''
class _lowerCamelCase ( UpperCAmelCase_ ):
"""simple docstring"""
UpperCAmelCase_ : Tuple =VOCAB_FILES_NAMES
UpperCAmelCase_ : Union[str, Any] =PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ : Union[str, Any] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , UpperCAmelCase , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=False , UpperCAmelCase="[CLS]" , UpperCAmelCase="[SEP]" , UpperCAmelCase="<unk>" , UpperCAmelCase="[SEP]" , UpperCAmelCase="<pad>" , UpperCAmelCase="[CLS]" , UpperCAmelCase="[MASK]" , UpperCAmelCase = None , **UpperCAmelCase , ) -> None:
'''simple docstring'''
__snake_case : Union[str, Any] = (
AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase , normalized=__lowercase )
if isinstance(__lowercase , __lowercase )
else mask_token
)
__snake_case : List[str] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=__lowercase , remove_space=__lowercase , keep_accents=__lowercase , bos_token=__lowercase , eos_token=__lowercase , unk_token=__lowercase , sep_token=__lowercase , pad_token=__lowercase , cls_token=__lowercase , mask_token=__lowercase , sp_model_kwargs=self.sp_model_kwargs , **__lowercase , )
__snake_case : str = do_lower_case
__snake_case : str = remove_space
__snake_case : Union[str, Any] = keep_accents
__snake_case : Dict = vocab_file
__snake_case : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__lowercase )
@property
def UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
return len(self.sp_model )
def UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
__snake_case : Optional[int] = {self.convert_ids_to_tokens(__lowercase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Union[str, Any]:
'''simple docstring'''
__snake_case : List[str] = self.__dict__.copy()
__snake_case : Union[str, Any] = None
return state
def __setstate__( self , UpperCAmelCase ) -> Any:
'''simple docstring'''
__snake_case : int = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
__snake_case : Dict = {}
__snake_case : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCAmelCase ( self , UpperCAmelCase ) -> List[str]:
'''simple docstring'''
if self.remove_space:
__snake_case : List[Any] = ''' '''.join(inputs.strip().split() )
else:
__snake_case : List[Any] = inputs
__snake_case : str = outputs.replace("``" , "\"" ).replace("\'\'" , "\"" )
if not self.keep_accents:
__snake_case : Optional[Any] = unicodedata.normalize("NFKD" , __lowercase )
__snake_case : List[str] = ''''''.join([c for c in outputs if not unicodedata.combining(__lowercase )] )
if self.do_lower_case:
__snake_case : List[Any] = outputs.lower()
return outputs
def UpperCAmelCase ( self , UpperCAmelCase ) -> List[str]:
'''simple docstring'''
__snake_case : Optional[Any] = self.preprocess_text(__lowercase )
__snake_case : List[str] = self.sp_model.encode(__lowercase , out_type=__lowercase )
__snake_case : Any = []
for piece in pieces:
if len(__lowercase ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit():
__snake_case : Union[str, Any] = self.sp_model.EncodeAsPieces(piece[:-1].replace(__lowercase , "" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
__snake_case : int = cur_pieces[1:]
else:
__snake_case : List[Any] = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(__lowercase )
else:
new_pieces.append(__lowercase )
return new_pieces
def UpperCAmelCase ( self , UpperCAmelCase ) -> str:
'''simple docstring'''
return self.sp_model.PieceToId(__lowercase )
def UpperCAmelCase ( self , UpperCAmelCase ) -> int:
'''simple docstring'''
return self.sp_model.IdToPiece(__lowercase )
def UpperCAmelCase ( self , UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
__snake_case : int = []
__snake_case : List[Any] = ''''''
__snake_case : int = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__lowercase ) + token
__snake_case : List[str] = True
__snake_case : Optional[Any] = []
else:
current_sub_tokens.append(__lowercase )
__snake_case : Tuple = False
out_string += self.sp_model.decode(__lowercase )
return out_string.strip()
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase = None ) -> List[int]:
'''simple docstring'''
__snake_case : str = [self.sep_token_id]
__snake_case : Tuple = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowercase , token_ids_a=__lowercase , already_has_special_tokens=__lowercase )
if token_ids_a is not None:
return [1] + ([0] * len(__lowercase )) + [1] + ([0] * len(__lowercase )) + [1]
return [1] + ([0] * len(__lowercase )) + [1]
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase = None ) -> List[int]:
'''simple docstring'''
__snake_case : List[Any] = [self.sep_token_id]
__snake_case : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(__lowercase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
__snake_case : List[Any] = os.path.join(
__lowercase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowercase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __lowercase )
elif not os.path.isfile(self.vocab_file ):
with open(__lowercase , "wb" ) as fi:
__snake_case : Optional[int] = self.sp_model.serialized_model_proto()
fi.write(__lowercase )
return (out_vocab_file,)
| 326
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class lowerCamelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
a__ : int = StableUnCLIPImgaImgPipeline
a__ : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS
a__ : Union[str, Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
a__ : Optional[Any] = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
a__ : int = frozenset([] )
def UpperCamelCase__ ( self) -> Tuple:
__UpperCamelCase :Tuple = 32
__UpperCamelCase :Optional[int] = embedder_hidden_size
# image encoding components
__UpperCamelCase :Union[str, Any] = CLIPImageProcessor(crop_size=32 , size=32)
torch.manual_seed(0)
__UpperCamelCase :Union[str, Any] = CLIPVisionModelWithProjection(
CLIPVisionConfig(
hidden_size=__lowercase , projection_dim=__lowercase , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ))
# regular denoising components
torch.manual_seed(0)
__UpperCamelCase :str = StableUnCLIPImageNormalizer(embedding_dim=__lowercase)
__UpperCamelCase :Optional[int] = DDPMScheduler(beta_schedule='''squaredcos_cap_v2''')
torch.manual_seed(0)
__UpperCamelCase :Union[str, Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''')
torch.manual_seed(0)
__UpperCamelCase :Dict = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=__lowercase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ))
torch.manual_seed(0)
__UpperCamelCase :List[Any] = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='''projection''' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=__lowercase , layers_per_block=1 , upcast_attention=__lowercase , use_linear_projection=__lowercase , )
torch.manual_seed(0)
__UpperCamelCase :Tuple = DDIMScheduler(
beta_schedule='''scaled_linear''' , beta_start=0.0_00_85 , beta_end=0.0_12 , prediction_type='''v_prediction''' , set_alpha_to_one=__lowercase , steps_offset=1 , )
torch.manual_seed(0)
__UpperCamelCase :List[str] = AutoencoderKL()
__UpperCamelCase :Tuple = {
# image encoding components
'''feature_extractor''': feature_extractor,
'''image_encoder''': image_encoder.eval(),
# image noising components
'''image_normalizer''': image_normalizer.eval(),
'''image_noising_scheduler''': image_noising_scheduler,
# regular denoising components
'''tokenizer''': tokenizer,
'''text_encoder''': text_encoder.eval(),
'''unet''': unet.eval(),
'''scheduler''': scheduler,
'''vae''': vae.eval(),
}
return components
def UpperCamelCase__ ( self , __lowercase , __lowercase=0 , __lowercase=True) -> str:
if str(__lowercase).startswith('''mps'''):
__UpperCamelCase :Union[str, Any] = torch.manual_seed(__lowercase)
else:
__UpperCamelCase :int = torch.Generator(device=__lowercase).manual_seed(__lowercase)
__UpperCamelCase :int = floats_tensor((1, 3, 32, 32) , rng=random.Random(__lowercase)).to(__lowercase)
if pil_image:
__UpperCamelCase :List[Any] = input_image * 0.5 + 0.5
__UpperCamelCase :Optional[Any] = input_image.clamp(0 , 1)
__UpperCamelCase :int = input_image.cpu().permute(0 , 2 , 3 , 1).float().numpy()
__UpperCamelCase :Optional[Any] = DiffusionPipeline.numpy_to_pil(__lowercase)[0]
return {
"prompt": "An anime racoon running a marathon",
"image": input_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
@skip_mps
def UpperCamelCase__ ( self) -> Union[str, Any]:
__UpperCamelCase :Dict = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__UpperCamelCase :Tuple = self.get_dummy_components()
__UpperCamelCase :Any = StableUnCLIPImgaImgPipeline(**__lowercase)
__UpperCamelCase :Optional[Any] = sd_pipe.to(__lowercase)
sd_pipe.set_progress_bar_config(disable=__lowercase)
__UpperCamelCase :List[Any] = self.get_dummy_inputs(__lowercase)
inputs.update({'''image_embeds''': None})
__UpperCamelCase :Any = sd_pipe(**__lowercase).images
__UpperCamelCase :List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__UpperCamelCase :List[Any] = np.array([0.38_72, 0.72_24, 0.56_01, 0.47_41, 0.68_72, 0.58_14, 0.46_36, 0.38_67, 0.50_78])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
def UpperCamelCase__ ( self) -> str:
__UpperCamelCase :Optional[Any] = torch_device in ['''cpu''', '''mps''']
self._test_attention_slicing_forward_pass(test_max_difference=__lowercase)
def UpperCamelCase__ ( self) -> List[Any]:
__UpperCamelCase :Optional[Any] = torch_device in ['''cpu''', '''mps''']
self._test_inference_batch_single_identical(test_max_difference=__lowercase)
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def UpperCamelCase__ ( self) -> Union[str, Any]:
self._test_xformers_attention_forwardGenerator_pass(test_max_difference=__lowercase)
@slow
@require_torch_gpu
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self) -> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self) -> Union[str, Any]:
__UpperCamelCase :int = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''')
__UpperCamelCase :Any = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy''')
__UpperCamelCase :List[Any] = StableUnCLIPImgaImgPipeline.from_pretrained(
'''fusing/stable-unclip-2-1-l-img2img''' , torch_dtype=torch.floataa)
pipe.to(__lowercase)
pipe.set_progress_bar_config(disable=__lowercase)
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__UpperCamelCase :int = torch.Generator(device='''cpu''').manual_seed(0)
__UpperCamelCase :Dict = pipe(__lowercase , '''anime turle''' , generator=__lowercase , output_type='''np''')
__UpperCamelCase :Dict = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(__lowercase , __lowercase)
def UpperCamelCase__ ( self) -> List[str]:
__UpperCamelCase :Optional[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''')
__UpperCamelCase :Any = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy''')
__UpperCamelCase :Any = StableUnCLIPImgaImgPipeline.from_pretrained(
'''fusing/stable-unclip-2-1-h-img2img''' , torch_dtype=torch.floataa)
pipe.to(__lowercase)
pipe.set_progress_bar_config(disable=__lowercase)
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__UpperCamelCase :int = torch.Generator(device='''cpu''').manual_seed(0)
__UpperCamelCase :Optional[int] = pipe(__lowercase , '''anime turle''' , generator=__lowercase , output_type='''np''')
__UpperCamelCase :List[Any] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(__lowercase , __lowercase)
def UpperCamelCase__ ( self) -> List[str]:
__UpperCamelCase :Dict = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''')
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__UpperCamelCase :List[Any] = StableUnCLIPImgaImgPipeline.from_pretrained(
'''fusing/stable-unclip-2-1-h-img2img''' , torch_dtype=torch.floataa)
__UpperCamelCase :Union[str, Any] = pipe.to(__lowercase)
pipe.set_progress_bar_config(disable=__lowercase)
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__UpperCamelCase :Optional[Any] = pipe(
__lowercase , '''anime turtle''' , num_inference_steps=2 , output_type='''np''' , )
__UpperCamelCase :int = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 43
| 0
|
'''simple docstring'''
def lowercase__( __UpperCamelCase: List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = 1
for i in range(1 ,num + 1 ):
fact *= i
return fact
def lowercase__( __UpperCamelCase: int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = 0
while number > 0:
SCREAMING_SNAKE_CASE : Optional[Any] = number % 10
sum_of_digits += last_digit
SCREAMING_SNAKE_CASE : str = number // 10 # Removing the last_digit from the given number
return sum_of_digits
def lowercase__( __UpperCamelCase: str = 1_00 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = factorial(__UpperCamelCase )
SCREAMING_SNAKE_CASE : Dict = split_and_add(__UpperCamelCase )
return result
if __name__ == "__main__":
print(solution(int(input("Enter the Number: ").strip())))
| 251
|
import numpy as np
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 1e-12 , SCREAMING_SNAKE_CASE = 100 , ):
'''simple docstring'''
assert np.shape(SCREAMING_SNAKE_CASE )[0] == np.shape(SCREAMING_SNAKE_CASE )[1]
# Ensure proper dimensionality.
assert np.shape(SCREAMING_SNAKE_CASE )[0] == np.shape(SCREAMING_SNAKE_CASE )[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(SCREAMING_SNAKE_CASE ) == np.iscomplexobj(SCREAMING_SNAKE_CASE )
__UpperCamelCase :List[Any] = np.iscomplexobj(SCREAMING_SNAKE_CASE )
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(SCREAMING_SNAKE_CASE , input_matrix.conj().T )
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
__UpperCamelCase :str = False
__UpperCamelCase :int = 0
__UpperCamelCase :Optional[Any] = 0
__UpperCamelCase :Union[str, Any] = 1e12
while not convergence:
# Multiple matrix by the vector.
__UpperCamelCase :List[str] = np.dot(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Normalize the resulting output vector.
__UpperCamelCase :Tuple = w / np.linalg.norm(SCREAMING_SNAKE_CASE )
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
__UpperCamelCase :int = vector.conj().T if is_complex else vector.T
__UpperCamelCase :Optional[int] = np.dot(SCREAMING_SNAKE_CASE , np.dot(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
# Check convergence.
__UpperCamelCase :Optional[Any] = np.abs(lambda_ - lambda_previous ) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
__UpperCamelCase :Dict = True
__UpperCamelCase :List[Any] = lambda_
if is_complex:
__UpperCamelCase :Tuple = np.real(lambda_ )
return lambda_, vector
def lowerCamelCase ( ):
'''simple docstring'''
__UpperCamelCase :int = np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]] )
__UpperCamelCase :Optional[Any] = np.array([41, 4, 20] )
__UpperCamelCase :Any = real_input_matrix.astype(np.complexaaa )
__UpperCamelCase :Dict = np.triu(1j * complex_input_matrix , 1 )
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
__UpperCamelCase :Optional[int] = np.array([41, 4, 20] ).astype(np.complexaaa )
for problem_type in ["real", "complex"]:
if problem_type == "real":
__UpperCamelCase :Any = real_input_matrix
__UpperCamelCase :int = real_vector
elif problem_type == "complex":
__UpperCamelCase :Tuple = complex_input_matrix
__UpperCamelCase :Optional[Any] = complex_vector
# Our implementation.
__UpperCamelCase , __UpperCamelCase :Dict = power_iteration(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
__UpperCamelCase , __UpperCamelCase :List[Any] = np.linalg.eigh(SCREAMING_SNAKE_CASE )
# Last eigenvalue is the maximum one.
__UpperCamelCase :List[Any] = eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
__UpperCamelCase :str = eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max ) <= 1e-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(SCREAMING_SNAKE_CASE ) - np.abs(SCREAMING_SNAKE_CASE ) ) <= 1e-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration()
| 43
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__a = {
'configuration_clap': [
'CLAP_PRETRAINED_MODEL_ARCHIVE_LIST',
'ClapAudioConfig',
'ClapConfig',
'ClapTextConfig',
],
'processing_clap': ['ClapProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'CLAP_PRETRAINED_MODEL_ARCHIVE_LIST',
'ClapModel',
'ClapPreTrainedModel',
'ClapTextModel',
'ClapTextModelWithProjection',
'ClapAudioModel',
'ClapAudioModelWithProjection',
]
__a = ['ClapFeatureExtractor']
if TYPE_CHECKING:
from .configuration_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioConfig,
ClapConfig,
ClapTextConfig,
)
from .processing_clap import ClapProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clap import ClapFeatureExtractor
from .modeling_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioModel,
ClapAudioModelWithProjection,
ClapModel,
ClapPreTrainedModel,
ClapTextModel,
ClapTextModelWithProjection,
)
else:
import sys
__a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 145
|
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__lowercase = logging.get_logger(__name__)
__lowercase = {'''tokenizer_file''': '''tokenizer.json'''}
__lowercase = {
'''tokenizer_file''': {
'''bigscience/tokenizer''': '''https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json''',
'''bigscience/bloom-560m''': '''https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json''',
'''bigscience/bloom-1b1''': '''https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json''',
'''bigscience/bloom-1b7''': '''https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json''',
'''bigscience/bloom-3b''': '''https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json''',
'''bigscience/bloom-7b1''': '''https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json''',
'''bigscience/bloom''': '''https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json''',
},
}
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
a__ : int = VOCAB_FILES_NAMES
a__ : Tuple = PRETRAINED_VOCAB_FILES_MAP
a__ : List[str] = ["""input_ids""", """attention_mask"""]
a__ : int = None
def __init__( self , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase="<unk>" , __lowercase="<s>" , __lowercase="</s>" , __lowercase="<pad>" , __lowercase=False , __lowercase=False , **__lowercase , ) -> List[str]:
super().__init__(
__lowercase , __lowercase , tokenizer_file=__lowercase , unk_token=__lowercase , bos_token=__lowercase , eos_token=__lowercase , pad_token=__lowercase , add_prefix_space=__lowercase , clean_up_tokenization_spaces=__lowercase , **__lowercase , )
__UpperCamelCase :int = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
if pre_tok_state.get('''add_prefix_space''' , __lowercase) != add_prefix_space:
__UpperCamelCase :Any = getattr(__lowercase , pre_tok_state.pop('''type'''))
__UpperCamelCase :str = add_prefix_space
__UpperCamelCase :List[str] = pre_tok_class(**__lowercase)
__UpperCamelCase :Tuple = add_prefix_space
def UpperCamelCase__ ( self , *__lowercase , **__lowercase) -> BatchEncoding:
__UpperCamelCase :Tuple = kwargs.get('''is_split_into_words''' , __lowercase)
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"""
''' pretokenized inputs.''')
return super()._batch_encode_plus(*__lowercase , **__lowercase)
def UpperCamelCase__ ( self , *__lowercase , **__lowercase) -> BatchEncoding:
__UpperCamelCase :List[str] = kwargs.get('''is_split_into_words''' , __lowercase)
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"""
''' pretokenized inputs.''')
return super()._encode_plus(*__lowercase , **__lowercase)
def UpperCamelCase__ ( self , __lowercase , __lowercase = None) -> Tuple[str]:
__UpperCamelCase :Optional[Any] = self._tokenizer.model.save(__lowercase , name=__lowercase)
return tuple(__lowercase)
def UpperCamelCase__ ( self , __lowercase) -> List[int]:
__UpperCamelCase :str = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(__lowercase , add_special_tokens=__lowercase) + [self.eos_token_id])
if len(__lowercase) > self.model_max_length:
__UpperCamelCase :Any = input_ids[-self.model_max_length :]
return input_ids
| 43
| 0
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional
from packaging import version
if TYPE_CHECKING:
from ... import PreTrainedTokenizer, TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import is_torch_available, logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"bigscience/bloom": "https://huggingface.co/bigscience/bloom/resolve/main/config.json",
"bigscience/bloom-560m": "https://huggingface.co/bigscience/bloom-560m/blob/main/config.json",
"bigscience/bloom-1b1": "https://huggingface.co/bigscience/bloom-1b1/blob/main/config.json",
"bigscience/bloom-1b7": "https://huggingface.co/bigscience/bloom-1b7/blob/main/config.json",
"bigscience/bloom-3b": "https://huggingface.co/bigscience/bloom-3b/blob/main/config.json",
"bigscience/bloom-7b1": "https://huggingface.co/bigscience/bloom-7b1/blob/main/config.json",
}
class UpperCAmelCase_ ( UpperCAmelCase_):
lowerCamelCase__ = """bloom"""
lowerCamelCase__ = ["""past_key_values"""]
lowerCamelCase__ = {
"""num_hidden_layers""": """n_layer""",
"""num_attention_heads""": """n_head""",
}
def __init__( self, __a=25_0880, __a=64, __a=2, __a=8, __a=1E-5, __a=0.02, __a=True, __a=1, __a=2, __a=False, __a=0.0, __a=0.0, __a=1, __a=False, **__a, ):
'''simple docstring'''
_lowerCAmelCase : Tuple = vocab_size
# Backward compatibility with n_embed kwarg
_lowerCAmelCase : Union[str, Any] = kwargs.pop("n_embed", __lowercase)
_lowerCAmelCase : Any = hidden_size if n_embed is None else n_embed
_lowerCAmelCase : List[str] = n_layer
_lowerCAmelCase : Optional[int] = n_head
_lowerCAmelCase : Optional[Any] = layer_norm_epsilon
_lowerCAmelCase : Optional[Any] = initializer_range
_lowerCAmelCase : str = use_cache
_lowerCAmelCase : List[Any] = pretraining_tp
_lowerCAmelCase : Tuple = apply_residual_connection_post_layernorm
_lowerCAmelCase : int = hidden_dropout
_lowerCAmelCase : Optional[int] = attention_dropout
_lowerCAmelCase : Dict = bos_token_id
_lowerCAmelCase : List[Any] = eos_token_id
_lowerCAmelCase : int = slow_but_exact
super().__init__(bos_token_id=__lowercase, eos_token_id=__lowercase, **__lowercase)
class UpperCAmelCase_ ( UpperCAmelCase_):
lowerCamelCase__ = version.parse('1.12')
def __init__( self, __a, __a = "default", __a = None, __a = False, ):
'''simple docstring'''
super().__init__(__lowercase, task=__lowercase, patching_specs=__lowercase, use_past=__lowercase)
if not getattr(self._config, "pad_token_id", __lowercase):
# TODO: how to do that better?
_lowerCAmelCase : List[Any] = 0
@property
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}})
if self.use_past:
# BLOOM stores values on dynamic axis 2. For more details see: https://github.com/huggingface/transformers/pull/18344
self.fill_with_past_key_values_(__lowercase, direction="inputs", inverted_values_shape=__lowercase)
_lowerCAmelCase : List[str] = {0: '''batch''', 1: '''past_sequence + sequence'''}
else:
_lowerCAmelCase : Union[str, Any] = {0: '''batch''', 1: '''sequence'''}
return common_inputs
@property
def snake_case__ ( self):
'''simple docstring'''
return self._config.n_layer
@property
def snake_case__ ( self):
'''simple docstring'''
return self._config.n_head
@property
def snake_case__ ( self):
'''simple docstring'''
return 1E-3
def snake_case__ ( self, __a, __a = -1, __a = -1, __a = False, __a = None, ):
'''simple docstring'''
_lowerCAmelCase : str = super(__lowercase, self).generate_dummy_inputs(
__lowercase, batch_size=__lowercase, seq_length=__lowercase, is_pair=__lowercase, framework=__lowercase)
# We need to order the input in the way they appears in the forward()
_lowerCAmelCase : Optional[Any] = OrderedDict({"input_ids": common_inputs["input_ids"]})
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed.")
else:
import torch
_lowerCAmelCase : Tuple = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
_lowerCAmelCase : List[str] = seqlen + 2
_lowerCAmelCase : Optional[int] = self._config.hidden_size // self.num_attention_heads
_lowerCAmelCase : Union[str, Any] = (
batch * self.num_attention_heads,
head_dim,
past_key_values_length,
)
_lowerCAmelCase : Optional[Any] = (
batch * self.num_attention_heads,
past_key_values_length,
head_dim,
)
_lowerCAmelCase : Tuple = [
(torch.zeros(__lowercase), torch.zeros(__lowercase)) for _ in range(self.num_layers)
]
_lowerCAmelCase : str = common_inputs['''attention_mask''']
if self.use_past:
_lowerCAmelCase : int = ordered_inputs['''attention_mask'''].dtype
_lowerCAmelCase : Any = torch.cat(
[ordered_inputs["attention_mask"], torch.ones(__lowercase, __lowercase, dtype=__lowercase)], dim=1)
return ordered_inputs
@property
def snake_case__ ( self):
'''simple docstring'''
return 13
| 36
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase = logging.get_logger(__name__)
__lowercase = {'''ctrl''': '''https://huggingface.co/ctrl/resolve/main/config.json'''}
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
a__ : str = """ctrl"""
a__ : Dict = ["""past_key_values"""]
a__ : Tuple = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , __lowercase=246_534 , __lowercase=256 , __lowercase=1_280 , __lowercase=8_192 , __lowercase=48 , __lowercase=16 , __lowercase=0.1 , __lowercase=0.1 , __lowercase=1E-6 , __lowercase=0.02 , __lowercase=True , **__lowercase , ) -> List[Any]:
__UpperCamelCase :List[str] = vocab_size
__UpperCamelCase :Optional[Any] = n_positions
__UpperCamelCase :Dict = n_embd
__UpperCamelCase :Dict = n_layer
__UpperCamelCase :List[Any] = n_head
__UpperCamelCase :int = dff
__UpperCamelCase :Union[str, Any] = resid_pdrop
__UpperCamelCase :Optional[int] = embd_pdrop
__UpperCamelCase :List[Any] = layer_norm_epsilon
__UpperCamelCase :Dict = initializer_range
__UpperCamelCase :Any = use_cache
super().__init__(**__lowercase)
| 43
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A = {
"configuration_upernet": ["UperNetConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"UperNetForSemanticSegmentation",
"UperNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_upernet import UperNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_upernet import UperNetForSemanticSegmentation, UperNetPreTrainedModel
else:
import sys
__A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 10
|
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class lowerCamelCase_ ( UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
a__ : str = TextToVideoSDPipeline
a__ : Union[str, Any] = TEXT_TO_IMAGE_PARAMS
a__ : Tuple = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
a__ : int = frozenset(
[
"""num_inference_steps""",
"""generator""",
"""latents""",
"""return_dict""",
"""callback""",
"""callback_steps""",
] )
def UpperCamelCase__ ( self) -> Optional[Any]:
torch.manual_seed(0)
__UpperCamelCase :str = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock3D''', '''CrossAttnDownBlock3D''', '''CrossAttnDownBlock3D''', '''DownBlock3D''') , up_block_types=('''UpBlock3D''', '''CrossAttnUpBlock3D''', '''CrossAttnUpBlock3D''', '''CrossAttnUpBlock3D''') , cross_attention_dim=32 , attention_head_dim=4 , )
__UpperCamelCase :Optional[int] = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='''scaled_linear''' , clip_sample=__lowercase , set_alpha_to_one=__lowercase , )
torch.manual_seed(0)
__UpperCamelCase :Optional[int] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0)
__UpperCamelCase :Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act='''gelu''' , projection_dim=512 , )
__UpperCamelCase :Optional[Any] = CLIPTextModel(__lowercase)
__UpperCamelCase :Optional[int] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''')
__UpperCamelCase :Union[str, Any] = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
}
return components
def UpperCamelCase__ ( self , __lowercase , __lowercase=0) -> Optional[int]:
if str(__lowercase).startswith('''mps'''):
__UpperCamelCase :List[Any] = torch.manual_seed(__lowercase)
else:
__UpperCamelCase :Tuple = torch.Generator(device=__lowercase).manual_seed(__lowercase)
__UpperCamelCase :Dict = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''pt''',
}
return inputs
def UpperCamelCase__ ( self) -> Optional[Any]:
__UpperCamelCase :int = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__UpperCamelCase :Optional[int] = self.get_dummy_components()
__UpperCamelCase :Dict = TextToVideoSDPipeline(**__lowercase)
__UpperCamelCase :Any = sd_pipe.to(__lowercase)
sd_pipe.set_progress_bar_config(disable=__lowercase)
__UpperCamelCase :Optional[Any] = self.get_dummy_inputs(__lowercase)
__UpperCamelCase :int = '''np'''
__UpperCamelCase :List[str] = sd_pipe(**__lowercase).frames
__UpperCamelCase :Optional[Any] = frames[0][-3:, -3:, -1]
assert frames[0].shape == (64, 64, 3)
__UpperCamelCase :str = np.array([1_58.0, 1_60.0, 1_53.0, 1_25.0, 1_00.0, 1_21.0, 1_11.0, 93.0, 1_13.0])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def UpperCamelCase__ ( self) -> Tuple:
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=__lowercase , expected_max_diff=3E-3)
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def UpperCamelCase__ ( self) -> Optional[int]:
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=__lowercase , expected_max_diff=1E-2)
@unittest.skip(reason='''Batching needs to be properly figured out first for this pipeline.''')
def UpperCamelCase__ ( self) -> Union[str, Any]:
pass
@unittest.skip(reason='''Batching needs to be properly figured out first for this pipeline.''')
def UpperCamelCase__ ( self) -> Dict:
pass
@unittest.skip(reason='''`num_images_per_prompt` argument is not supported for this pipeline.''')
def UpperCamelCase__ ( self) -> str:
pass
def UpperCamelCase__ ( self) -> List[str]:
return super().test_progress_bar()
@slow
@skip_mps
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self) -> Dict:
__UpperCamelCase :Union[str, Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy''')
__UpperCamelCase :List[str] = TextToVideoSDPipeline.from_pretrained('''damo-vilab/text-to-video-ms-1.7b''')
__UpperCamelCase :Union[str, Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
__UpperCamelCase :str = pipe.to('''cuda''')
__UpperCamelCase :Optional[Any] = '''Spiderman is surfing'''
__UpperCamelCase :Union[str, Any] = torch.Generator(device='''cpu''').manual_seed(0)
__UpperCamelCase :List[Any] = pipe(__lowercase , generator=__lowercase , num_inference_steps=25 , output_type='''pt''').frames
__UpperCamelCase :Optional[int] = video_frames.cpu().numpy()
assert np.abs(expected_video - video).mean() < 5E-2
def UpperCamelCase__ ( self) -> int:
__UpperCamelCase :str = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy''')
__UpperCamelCase :Union[str, Any] = TextToVideoSDPipeline.from_pretrained('''damo-vilab/text-to-video-ms-1.7b''')
__UpperCamelCase :str = pipe.to('''cuda''')
__UpperCamelCase :Union[str, Any] = '''Spiderman is surfing'''
__UpperCamelCase :int = torch.Generator(device='''cpu''').manual_seed(0)
__UpperCamelCase :List[Any] = pipe(__lowercase , generator=__lowercase , num_inference_steps=2 , output_type='''pt''').frames
__UpperCamelCase :Optional[Any] = video_frames.cpu().numpy()
assert np.abs(expected_video - video).mean() < 5E-2
| 43
| 0
|
"""simple docstring"""
import requests
_A : List[str] = """YOUR API KEY"""
def __magic_name__ ( __snake_case : List[str] , __snake_case : Optional[int] = giphy_api_key ) -> Dict:
lowercase : Any = '''+'''.join(query.split() )
lowercase : Union[str, Any] = f"""https://api.giphy.com/v1/gifs/search?q={formatted_query}&api_key={api_key}"""
lowercase : str = requests.get(__snake_case ).json()['''data''']
return [gif["url"] for gif in gifs]
if __name__ == "__main__":
print("""\n""".join(get_gifs("""space ship""")))
| 202
|
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Tuple = [0 for i in range(len(SCREAMING_SNAKE_CASE ) )]
# initialize interval's left pointer and right pointer
__UpperCamelCase , __UpperCamelCase :str = 0, 0
for i in range(1 , len(SCREAMING_SNAKE_CASE ) ):
# case when current index is inside the interval
if i <= right_pointer:
__UpperCamelCase :Union[str, Any] = min(right_pointer - i + 1 , z_result[i - left_pointer] )
__UpperCamelCase :Tuple = min_edge
while go_next(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
z_result[i] += 1
# if new index's result gives us more right interval,
# we've to update left_pointer and right_pointer
if i + z_result[i] - 1 > right_pointer:
__UpperCamelCase , __UpperCamelCase :Union[str, Any] = i, i + z_result[i] - 1
return z_result
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return i + z_result[i] < len(SCREAMING_SNAKE_CASE ) and s[z_result[i]] == s[i + z_result[i]]
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :List[Any] = 0
# concatenate 'pattern' and 'input_str' and call z_function
# with concatenated string
__UpperCamelCase :Tuple = z_function(pattern + input_str )
for val in z_result:
# if value is greater then length of the pattern string
# that means this index is starting position of substring
# which is equal to pattern string
if val >= len(SCREAMING_SNAKE_CASE ):
answer += 1
return answer
if __name__ == "__main__":
import doctest
doctest.testmod()
| 43
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
lowerCAmelCase__ = {
'''configuration_speech_to_text''': ['''SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Speech2TextConfig'''],
'''processing_speech_to_text''': ['''Speech2TextProcessor'''],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['''Speech2TextTokenizer''']
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['''Speech2TextFeatureExtractor''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFSpeech2TextForConditionalGeneration''',
'''TFSpeech2TextModel''',
'''TFSpeech2TextPreTrainedModel''',
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Speech2TextForConditionalGeneration''',
'''Speech2TextModel''',
'''Speech2TextPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 130
|
import math
from typing import Any, Callable, List, Optional, Tuple, Union
import numpy as np
import torch
from ...models import TaFilmDecoder
from ...schedulers import DDPMScheduler
from ...utils import is_onnx_available, logging, randn_tensor
if is_onnx_available():
from ..onnx_utils import OnnxRuntimeModel
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
from .continous_encoder import SpectrogramContEncoder
from .notes_encoder import SpectrogramNotesEncoder
__lowercase = logging.get_logger(__name__) # pylint: disable=invalid-name
__lowercase = 256
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
a__ : Tuple = ["""melgan"""]
def __init__( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ) -> None:
super().__init__()
# From MELGAN
__UpperCamelCase :int = math.log(1E-5) # Matches MelGAN training.
__UpperCamelCase :int = 4.0 # Largest value for most examples
__UpperCamelCase :str = 128
self.register_modules(
notes_encoder=__lowercase , continuous_encoder=__lowercase , decoder=__lowercase , scheduler=__lowercase , melgan=__lowercase , )
def UpperCamelCase__ ( self , __lowercase , __lowercase=(-1.0, 1.0) , __lowercase=False) -> Dict:
__UpperCamelCase , __UpperCamelCase :str = output_range
if clip:
__UpperCamelCase :Union[str, Any] = torch.clip(__lowercase , self.min_value , self.max_value)
# Scale to [0, 1].
__UpperCamelCase :Union[str, Any] = (features - self.min_value) / (self.max_value - self.min_value)
# Scale to [min_out, max_out].
return zero_one * (max_out - min_out) + min_out
def UpperCamelCase__ ( self , __lowercase , __lowercase=(-1.0, 1.0) , __lowercase=False) -> Optional[int]:
__UpperCamelCase , __UpperCamelCase :int = input_range
__UpperCamelCase :Optional[int] = torch.clip(__lowercase , __lowercase , __lowercase) if clip else outputs
# Scale to [0, 1].
__UpperCamelCase :List[str] = (outputs - min_out) / (max_out - min_out)
# Scale to [self.min_value, self.max_value].
return zero_one * (self.max_value - self.min_value) + self.min_value
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase) -> List[Any]:
__UpperCamelCase :List[str] = input_tokens > 0
__UpperCamelCase , __UpperCamelCase :Union[str, Any] = self.notes_encoder(
encoder_input_tokens=__lowercase , encoder_inputs_mask=__lowercase)
__UpperCamelCase , __UpperCamelCase :Union[str, Any] = self.continuous_encoder(
encoder_inputs=__lowercase , encoder_inputs_mask=__lowercase)
return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)]
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase) -> str:
__UpperCamelCase :Optional[int] = noise_time
if not torch.is_tensor(__lowercase):
__UpperCamelCase :str = torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device)
elif torch.is_tensor(__lowercase) and len(timesteps.shape) == 0:
__UpperCamelCase :Dict = timesteps[None].to(input_tokens.device)
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
__UpperCamelCase :List[str] = timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device)
__UpperCamelCase :Tuple = self.decoder(
encodings_and_masks=__lowercase , decoder_input_tokens=__lowercase , decoder_noise_time=__lowercase)
return logits
@torch.no_grad()
def __call__( self , __lowercase , __lowercase = None , __lowercase = 100 , __lowercase = True , __lowercase = "numpy" , __lowercase = None , __lowercase = 1 , ) -> Union[AudioPipelineOutput, Tuple]:
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(__lowercase , __lowercase) or callback_steps <= 0)
):
raise ValueError(
f"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
f""" {type(__lowercase)}.""")
__UpperCamelCase :Union[str, Any] = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa)
__UpperCamelCase :Union[str, Any] = np.zeros([1, 0, self.n_dims] , np.floataa)
__UpperCamelCase :Union[str, Any] = torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=__lowercase , device=self.device)
for i, encoder_input_tokens in enumerate(__lowercase):
if i == 0:
__UpperCamelCase :int = torch.from_numpy(pred_mel[:1].copy()).to(
device=self.device , dtype=self.decoder.dtype)
# The first chunk has no previous context.
__UpperCamelCase :int = torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=__lowercase , device=self.device)
else:
# The full song pipeline does not feed in a context feature, so the mask
# will be all 0s after the feature converter. Because we know we're
# feeding in a full context chunk from the previous prediction, set it
# to all 1s.
__UpperCamelCase :Tuple = ones
__UpperCamelCase :Optional[Any] = self.scale_features(
__lowercase , output_range=[-1.0, 1.0] , clip=__lowercase)
__UpperCamelCase :int = self.encode(
input_tokens=torch.IntTensor([encoder_input_tokens]).to(device=self.device) , continuous_inputs=__lowercase , continuous_mask=__lowercase , )
# Sample encoder_continuous_inputs shaped gaussian noise to begin loop
__UpperCamelCase :int = randn_tensor(
shape=encoder_continuous_inputs.shape , generator=__lowercase , device=self.device , dtype=self.decoder.dtype , )
# set step values
self.scheduler.set_timesteps(__lowercase)
# Denoising diffusion loop
for j, t in enumerate(self.progress_bar(self.scheduler.timesteps)):
__UpperCamelCase :Optional[int] = self.decode(
encodings_and_masks=__lowercase , input_tokens=__lowercase , noise_time=t / self.scheduler.config.num_train_timesteps , )
# Compute previous output: x_t -> x_t-1
__UpperCamelCase :int = self.scheduler.step(__lowercase , __lowercase , __lowercase , generator=__lowercase).prev_sample
__UpperCamelCase :Tuple = self.scale_to_features(__lowercase , input_range=[-1.0, 1.0])
__UpperCamelCase :List[Any] = mel[:1]
__UpperCamelCase :Optional[Any] = mel.cpu().float().numpy()
__UpperCamelCase :Any = np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1)
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(__lowercase , __lowercase)
logger.info('''Generated segment''' , __lowercase)
if output_type == "numpy" and not is_onnx_available():
raise ValueError(
'''Cannot return output in \'np\' format if ONNX is not available. Make sure to have ONNX installed or set \'output_type\' to \'mel\'.''')
elif output_type == "numpy" and self.melgan is None:
raise ValueError(
'''Cannot return output in \'np\' format if melgan component is not defined. Make sure to define `self.melgan` or set \'output_type\' to \'mel\'.''')
if output_type == "numpy":
__UpperCamelCase :Optional[Any] = self.melgan(input_features=full_pred_mel.astype(np.floataa))
else:
__UpperCamelCase :List[str] = full_pred_mel
if not return_dict:
return (output,)
return AudioPipelineOutput(audios=__lowercase)
| 43
| 0
|
"""simple docstring"""
import json
import os
import tempfile
import datasets
from utils import generate_example_dataset, get_duration
A__ : Any = 50_000
A__ : int = 5_000
A__ , A__ : Optional[int] = os.path.split(__file__)
A__ : List[Any] = os.path.join(RESULTS_BASEPATH, 'results', RESULTS_FILENAME.replace('.py', '.json'))
@get_duration
def _snake_case ( lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Tuple ) -> str:
for i in range(lowerCamelCase__ ):
lowerCamelCase_ : int =dataset[i]
@get_duration
def _snake_case ( lowerCamelCase__ : Any , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : int ) -> List[Any]:
for i in range(0 , len(lowerCamelCase__ ) , lowerCamelCase__ ):
lowerCamelCase_ : str =dataset[i : i + batch_size]
@get_duration
def _snake_case ( lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Any , lowerCamelCase__ : List[str] ) -> Dict:
with dataset.formatted_as(type=lowerCamelCase__ ):
for i in range(lowerCamelCase__ ):
lowerCamelCase_ : List[str] =dataset[i]
@get_duration
def _snake_case ( lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Any , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Tuple ) -> Tuple:
with dataset.formatted_as(type=lowerCamelCase__ ):
for i in range(0 , lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ : List[Any] =dataset[i : i + batch_size]
def _snake_case ( ) -> Optional[Any]:
lowerCamelCase_ : Optional[Any] ={'''num examples''': SPEED_TEST_N_EXAMPLES}
lowerCamelCase_ : Optional[Any] =[
(read, {'''length''': SMALL_TEST}),
(read, {'''length''': SPEED_TEST_N_EXAMPLES}),
(read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 10}),
(read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 100}),
(read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 1_000}),
(read_formatted, {'''type''': '''numpy''', '''length''': SMALL_TEST}),
(read_formatted, {'''type''': '''pandas''', '''length''': SMALL_TEST}),
(read_formatted, {'''type''': '''torch''', '''length''': SMALL_TEST}),
(read_formatted, {'''type''': '''tensorflow''', '''length''': SMALL_TEST}),
(read_formatted_batch, {'''type''': '''numpy''', '''length''': SMALL_TEST, '''batch_size''': 10}),
(read_formatted_batch, {'''type''': '''numpy''', '''length''': SMALL_TEST, '''batch_size''': 1_000}),
]
lowerCamelCase_ : List[str] =[
(read, {'''length''': SMALL_TEST}),
(read, {'''length''': SPEED_TEST_N_EXAMPLES}),
(read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 10}),
(read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 100}),
(read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 1_000}),
(read_formatted, {'''type''': '''numpy''', '''length''': SMALL_TEST}),
(read_formatted_batch, {'''type''': '''numpy''', '''length''': SMALL_TEST, '''batch_size''': 10}),
(read_formatted_batch, {'''type''': '''numpy''', '''length''': SMALL_TEST, '''batch_size''': 1_000}),
]
with tempfile.TemporaryDirectory() as tmp_dir:
print("generating dataset" )
lowerCamelCase_ : Any =datasets.Features(
{"list": datasets.Sequence(datasets.Value("float32" ) ), "numbers": datasets.Value("float32" )} )
lowerCamelCase_ : int =generate_example_dataset(
os.path.join(lowerCamelCase__ , "dataset.arrow" ) , lowerCamelCase__ , num_examples=lowerCamelCase__ , seq_shapes={"list": (100,)} , )
print("first set of iterations" )
for func, kwargs in functions:
print(func.__name__ , str(lowerCamelCase__ ) )
lowerCamelCase_ : Union[str, Any] =func(lowerCamelCase__ , **lowerCamelCase__ )
print("shuffling dataset" )
lowerCamelCase_ : Union[str, Any] =dataset.shuffle()
print("Second set of iterations (after shuffling" )
for func, kwargs in functions_shuffled:
print("shuffled " , func.__name__ , str(lowerCamelCase__ ) )
lowerCamelCase_ : List[Any] =func(
lowerCamelCase__ , **lowerCamelCase__ )
with open(lowerCamelCase__ , "wb" ) as f:
f.write(json.dumps(lowerCamelCase__ ).encode("utf-8" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_iterating()
| 144
|
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
HubertConfig,
HubertForCTC,
HubertModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
__lowercase = logging.get_logger(__name__)
__lowercase = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
for attribute in key.split('''.''' ):
__UpperCamelCase :str = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if weight_type is not None:
__UpperCamelCase :Any = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).shape
else:
__UpperCamelCase :Union[str, Any] = hf_pointer.shape
assert hf_shape == value.shape, (
f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
__UpperCamelCase :str = value
elif weight_type == "weight_g":
__UpperCamelCase :List[str] = value
elif weight_type == "weight_v":
__UpperCamelCase :str = value
elif weight_type == "bias":
__UpperCamelCase :Union[str, Any] = value
else:
__UpperCamelCase :str = value
logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :List[Any] = []
__UpperCamelCase :int = fairseq_model.state_dict()
__UpperCamelCase :List[Any] = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
__UpperCamelCase :List[Any] = False
if "conv_layers" in name:
load_conv_layer(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , hf_model.config.feat_extract_norm == '''group''' , )
__UpperCamelCase :List[str] = True
else:
for key, mapped_key in MAPPING.items():
__UpperCamelCase :Dict = '''hubert.''' + mapped_key if (is_finetuned and mapped_key != '''lm_head''') else mapped_key
if key in name or (key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0] and not is_finetuned):
__UpperCamelCase :Optional[Any] = True
if "*" in mapped_key:
__UpperCamelCase :List[str] = name.split(SCREAMING_SNAKE_CASE )[0].split('''.''' )[-2]
__UpperCamelCase :Optional[int] = mapped_key.replace('''*''' , SCREAMING_SNAKE_CASE )
if "weight_g" in name:
__UpperCamelCase :int = '''weight_g'''
elif "weight_v" in name:
__UpperCamelCase :List[Any] = '''weight_v'''
elif "weight" in name:
__UpperCamelCase :Dict = '''weight'''
elif "bias" in name:
__UpperCamelCase :Dict = '''bias'''
else:
__UpperCamelCase :Dict = None
set_recursively(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
continue
if not is_used:
unused_weights.append(SCREAMING_SNAKE_CASE )
logger.warning(f"""Unused weights: {unused_weights}""" )
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Tuple = full_name.split('''conv_layers.''' )[-1]
__UpperCamelCase :Optional[int] = name.split('''.''' )
__UpperCamelCase :str = int(items[0] )
__UpperCamelCase :List[Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
__UpperCamelCase :Dict = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
__UpperCamelCase :Any = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
__UpperCamelCase :int = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
__UpperCamelCase :Union[str, Any] = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(SCREAMING_SNAKE_CASE )
@torch.no_grad()
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=True ):
'''simple docstring'''
if config_path is not None:
__UpperCamelCase :Tuple = HubertConfig.from_pretrained(SCREAMING_SNAKE_CASE )
else:
__UpperCamelCase :Optional[int] = HubertConfig()
if is_finetuned:
if dict_path:
__UpperCamelCase :Optional[int] = Dictionary.load(SCREAMING_SNAKE_CASE )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__UpperCamelCase :Optional[int] = target_dict.pad_index
__UpperCamelCase :Dict = target_dict.bos_index
__UpperCamelCase :str = target_dict.eos_index
__UpperCamelCase :Dict = len(target_dict.symbols )
__UpperCamelCase :List[Any] = os.path.join(SCREAMING_SNAKE_CASE , '''vocab.json''' )
if not os.path.isdir(SCREAMING_SNAKE_CASE ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(SCREAMING_SNAKE_CASE ) )
return
os.makedirs(SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE )
with open(SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(target_dict.indices , SCREAMING_SNAKE_CASE )
__UpperCamelCase :Optional[int] = WavaVecaCTCTokenizer(
SCREAMING_SNAKE_CASE , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=SCREAMING_SNAKE_CASE , )
__UpperCamelCase :Union[str, Any] = True if config.feat_extract_norm == '''layer''' else False
__UpperCamelCase :Any = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=SCREAMING_SNAKE_CASE , return_attention_mask=SCREAMING_SNAKE_CASE , )
__UpperCamelCase :Any = WavaVecaProcessor(feature_extractor=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE )
processor.save_pretrained(SCREAMING_SNAKE_CASE )
__UpperCamelCase :List[str] = HubertForCTC(SCREAMING_SNAKE_CASE )
else:
__UpperCamelCase :str = HubertModel(SCREAMING_SNAKE_CASE )
if is_finetuned:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase :int = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
else:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase :Any = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
__UpperCamelCase :Dict = model[0].eval()
recursively_load_weights(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
hf_wavavec.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__lowercase = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
__lowercase = parser.parse_args()
convert_hubert_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 43
| 0
|
"""simple docstring"""
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class lowercase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
'''simple docstring'''
lowercase__ = [r"""h\.\d+\.attn\.bias""", r"""h\.\d+\.attn\.masked_bias"""]
@register_to_config
def __init__( self: int, a_: str, a_: Tuple, a_: Dict = None, a_: str = 50_257, a_: Tuple = 1_024, a_: int = 768, a_: Dict = 12, a_: List[Any] = 12, a_: Tuple = None, a_: int = "gelu_new", a_: Union[str, Any] = 0.1, a_: int = 0.1, a_: Optional[int] = 0.1, a_: int = 1E-5, a_: Optional[Any] = 0.02, a_: List[Any] = True, a_: str = True, a_: Optional[int] = False, a_: Any = False, ):
'''simple docstring'''
super().__init__()
_snake_case : Optional[int] = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
f"`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and"
f" `n_embd`: {n_embd} are not equal." )
_snake_case : int = prefix_inner_dim
_snake_case : List[Any] = prefix_hidden_dim
_snake_case : int = (
nn.Linear(self.prefix_inner_dim, self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
_snake_case : Tuple = (
nn.Linear(self.prefix_hidden_dim, __lowercase ) if self.prefix_hidden_dim is not None else nn.Identity()
)
_snake_case : Union[str, Any] = GPTaConfig(
vocab_size=__lowercase, n_positions=__lowercase, n_embd=__lowercase, n_layer=__lowercase, n_head=__lowercase, n_inner=__lowercase, activation_function=__lowercase, resid_pdrop=__lowercase, embd_pdrop=__lowercase, attn_pdrop=__lowercase, layer_norm_epsilon=__lowercase, initializer_range=__lowercase, scale_attn_weights=__lowercase, use_cache=__lowercase, scale_attn_by_inverse_layer_idx=__lowercase, reorder_and_upcast_attn=__lowercase, )
_snake_case : List[str] = GPTaLMHeadModel(__lowercase )
def UpperCamelCase_ ( self: Tuple, a_: int, a_: int, a_: str = None, a_: Any = None, ):
'''simple docstring'''
_snake_case : Any = self.transformer.transformer.wte(__lowercase )
_snake_case : Union[str, Any] = self.encode_prefix(__lowercase )
_snake_case : List[str] = self.decode_prefix(__lowercase )
_snake_case : str = torch.cat((prefix_embeds, embedding_text), dim=1 )
if labels is not None:
_snake_case : List[Any] = self.get_dummy_token(input_ids.shape[0], input_ids.device )
_snake_case : int = torch.cat((dummy_token, input_ids), dim=1 )
_snake_case : int = self.transformer(inputs_embeds=__lowercase, labels=__lowercase, attention_mask=__lowercase )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def UpperCamelCase_ ( self: Tuple, a_: List[Any], a_: Dict ):
'''simple docstring'''
return torch.zeros(__lowercase, self.prefix_length, dtype=torch.intaa, device=__lowercase )
def UpperCamelCase_ ( self: Dict, a_: str ):
'''simple docstring'''
return self.encode_prefix(__lowercase )
@torch.no_grad()
def UpperCamelCase_ ( self: List[str], a_: Optional[Any], a_: Union[str, Any], a_: List[str] ):
'''simple docstring'''
_snake_case : Tuple = torch.split(__lowercase, 1, dim=0 )
_snake_case : Tuple = []
_snake_case : Union[str, Any] = []
for feature in features:
_snake_case : Optional[Any] = self.decode_prefix(feature.to(__lowercase ) ) # back to the clip feature
# Only support beam search for now
_snake_case : List[str] = self.generate_beam(
input_embeds=__lowercase, device=__lowercase, eos_token_id=__lowercase )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
_snake_case : Optional[Any] = torch.stack(__lowercase )
_snake_case : str = torch.stack(__lowercase )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def UpperCamelCase_ ( self: Dict, a_: List[str]=None, a_: List[str]=None, a_: Any=None, a_: Optional[Any] = 5, a_: Union[str, Any] = 67, a_: str = 1.0, a_: Union[str, Any] = None, ):
'''simple docstring'''
_snake_case : Optional[Any] = eos_token_id
_snake_case : str = None
_snake_case : Optional[int] = None
_snake_case : List[Any] = torch.ones(__lowercase, device=__lowercase, dtype=torch.int )
_snake_case : List[str] = torch.zeros(__lowercase, device=__lowercase, dtype=torch.bool )
if input_embeds is not None:
_snake_case : Dict = input_embeds
else:
_snake_case : int = self.transformer.transformer.wte(__lowercase )
for i in range(__lowercase ):
_snake_case : List[str] = self.transformer(inputs_embeds=__lowercase )
_snake_case : List[str] = outputs.logits
_snake_case : Union[str, Any] = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
_snake_case : List[str] = logits.softmax(-1 ).log()
if scores is None:
_snake_case : Optional[Any] = logits.topk(__lowercase, -1 )
_snake_case : List[str] = generated.expand(__lowercase, *generated.shape[1:] )
_snake_case : Optional[int] = next_tokens.permute(1, 0 ), scores.squeeze(0 )
if tokens is None:
_snake_case : str = next_tokens
else:
_snake_case : List[Any] = tokens.expand(__lowercase, *tokens.shape[1:] )
_snake_case : List[str] = torch.cat((tokens, next_tokens), dim=1 )
else:
_snake_case : Dict = -float(np.inf )
_snake_case : Tuple = 0
_snake_case : List[str] = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
_snake_case : Optional[Any] = scores_sum / seq_lengths[:, None]
_snake_case : Optional[Any] = scores_sum_average.view(-1 ).topk(__lowercase, -1 )
_snake_case : str = next_tokens // scores_sum.shape[1]
_snake_case : Tuple = seq_lengths[next_tokens_source]
_snake_case : str = next_tokens % scores_sum.shape[1]
_snake_case : int = next_tokens.unsqueeze(1 )
_snake_case : Any = tokens[next_tokens_source]
_snake_case : Optional[int] = torch.cat((tokens, next_tokens), dim=1 )
_snake_case : Optional[Any] = generated[next_tokens_source]
_snake_case : str = scores_sum_average * seq_lengths
_snake_case : Union[str, Any] = is_stopped[next_tokens_source]
_snake_case : Optional[int] = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0], 1, -1 )
_snake_case : int = torch.cat((generated, next_token_embed), dim=1 )
_snake_case : List[str] = is_stopped + next_tokens.eq(__lowercase ).squeeze()
if is_stopped.all():
break
_snake_case : Any = scores / seq_lengths
_snake_case : Optional[int] = scores.argsort(descending=__lowercase )
# tokens tensors are already padded to max_seq_length
_snake_case : int = [tokens[i] for i in order]
_snake_case : str = torch.stack(__lowercase, dim=0 )
_snake_case : List[str] = torch.tensor([seq_lengths[i] for i in order], dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 64
|
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
import numpy as np
# Parrameters
__lowercase = (720, 1280) # Height, Width
__lowercase = (0.4, 0.6) # if height or width lower than this scale, drop it.
__lowercase = 1 / 100
__lowercase = ''''''
__lowercase = ''''''
__lowercase = ''''''
__lowercase = 250
def lowerCamelCase ( ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase :List[Any] = get_dataset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for index in range(SCREAMING_SNAKE_CASE ):
__UpperCamelCase :Optional[Any] = random.sample(range(len(SCREAMING_SNAKE_CASE ) ) , 4 )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase :str = update_image_and_anno(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , filter_scale=SCREAMING_SNAKE_CASE , )
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
__UpperCamelCase :List[Any] = random_chars(32 )
__UpperCamelCase :List[str] = path.split(os.sep )[-1].rsplit('''.''' , 1 )[0]
__UpperCamelCase :Tuple = f"""{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}"""
cva.imwrite(f"""{file_root}.jpg""" , SCREAMING_SNAKE_CASE , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(f"""Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}""" )
__UpperCamelCase :Optional[Any] = []
for anno in new_annos:
__UpperCamelCase :int = anno[3] - anno[1]
__UpperCamelCase :Optional[int] = anno[4] - anno[2]
__UpperCamelCase :int = anno[1] + width / 2
__UpperCamelCase :List[str] = anno[2] + height / 2
__UpperCamelCase :str = f"""{anno[0]} {x_center} {y_center} {width} {height}"""
annos_list.append(SCREAMING_SNAKE_CASE )
with open(f"""{file_root}.txt""" , '''w''' ) as outfile:
outfile.write('''\n'''.join(line for line in annos_list ) )
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :str = []
__UpperCamelCase :str = []
for label_file in glob.glob(os.path.join(SCREAMING_SNAKE_CASE , '''*.txt''' ) ):
__UpperCamelCase :Any = label_file.split(os.sep )[-1].rsplit('''.''' , 1 )[0]
with open(SCREAMING_SNAKE_CASE ) as in_file:
__UpperCamelCase :str = in_file.readlines()
__UpperCamelCase :Optional[int] = os.path.join(SCREAMING_SNAKE_CASE , f"""{label_name}.jpg""" )
__UpperCamelCase :int = []
for obj_list in obj_lists:
__UpperCamelCase :Optional[int] = obj_list.rstrip('''\n''' ).split(''' ''' )
__UpperCamelCase :Any = float(obj[1] ) - float(obj[3] ) / 2
__UpperCamelCase :List[str] = float(obj[2] ) - float(obj[4] ) / 2
__UpperCamelCase :Dict = float(obj[1] ) + float(obj[3] ) / 2
__UpperCamelCase :List[str] = float(obj[2] ) + float(obj[4] ) / 2
boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] )
if not boxes:
continue
img_paths.append(SCREAMING_SNAKE_CASE )
labels.append(SCREAMING_SNAKE_CASE )
return img_paths, labels
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 0.0 , ):
'''simple docstring'''
__UpperCamelCase :List[str] = np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta )
__UpperCamelCase :List[Any] = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
__UpperCamelCase :int = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
__UpperCamelCase :Optional[int] = int(scale_x * output_size[1] )
__UpperCamelCase :Any = int(scale_y * output_size[0] )
__UpperCamelCase :List[str] = []
__UpperCamelCase :Dict = []
for i, index in enumerate(SCREAMING_SNAKE_CASE ):
__UpperCamelCase :Any = all_img_list[index]
path_list.append(SCREAMING_SNAKE_CASE )
__UpperCamelCase :Any = all_annos[index]
__UpperCamelCase :Union[str, Any] = cva.imread(SCREAMING_SNAKE_CASE )
if i == 0: # top-left
__UpperCamelCase :str = cva.resize(SCREAMING_SNAKE_CASE , (divid_point_x, divid_point_y) )
__UpperCamelCase :Union[str, Any] = img
for bbox in img_annos:
__UpperCamelCase :Union[str, Any] = bbox[1] * scale_x
__UpperCamelCase :Optional[Any] = bbox[2] * scale_y
__UpperCamelCase :int = bbox[3] * scale_x
__UpperCamelCase :Union[str, Any] = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 1: # top-right
__UpperCamelCase :str = cva.resize(SCREAMING_SNAKE_CASE , (output_size[1] - divid_point_x, divid_point_y) )
__UpperCamelCase :List[str] = img
for bbox in img_annos:
__UpperCamelCase :str = scale_x + bbox[1] * (1 - scale_x)
__UpperCamelCase :Dict = bbox[2] * scale_y
__UpperCamelCase :Optional[Any] = scale_x + bbox[3] * (1 - scale_x)
__UpperCamelCase :List[Any] = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 2: # bottom-left
__UpperCamelCase :str = cva.resize(SCREAMING_SNAKE_CASE , (divid_point_x, output_size[0] - divid_point_y) )
__UpperCamelCase :Optional[int] = img
for bbox in img_annos:
__UpperCamelCase :Tuple = bbox[1] * scale_x
__UpperCamelCase :Optional[Any] = scale_y + bbox[2] * (1 - scale_y)
__UpperCamelCase :Tuple = bbox[3] * scale_x
__UpperCamelCase :Dict = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
else: # bottom-right
__UpperCamelCase :Optional[int] = cva.resize(
SCREAMING_SNAKE_CASE , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) )
__UpperCamelCase :Optional[int] = img
for bbox in img_annos:
__UpperCamelCase :Optional[Any] = scale_x + bbox[1] * (1 - scale_x)
__UpperCamelCase :Optional[int] = scale_y + bbox[2] * (1 - scale_y)
__UpperCamelCase :Optional[Any] = scale_x + bbox[3] * (1 - scale_x)
__UpperCamelCase :int = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
# Remove bounding box small than scale of filter
if filter_scale > 0:
__UpperCamelCase :List[Any] = [
anno
for anno in new_anno
if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2])
]
return output_img, new_anno, path_list[0]
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
assert number_char > 1, "The number of character should greater than 1"
__UpperCamelCase :Optional[Any] = ascii_lowercase + digits
return "".join(random.choice(SCREAMING_SNAKE_CASE ) for _ in range(SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
main()
print('''DONE ✅''')
| 43
| 0
|
"""simple docstring"""
import argparse
import os
import shutil
from pathlib import Path
import onnx
import torch
from packaging import version
from torch.onnx import export
from diffusers import OnnxRuntimeModel, OnnxStableDiffusionPipeline, StableDiffusionPipeline
lowerCamelCase_ : int = version.parse(version.parse(torch.__version__).base_version) < version.parse("""1.11""")
def _A ( lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase=False , ):
"""simple docstring"""
output_path.parent.mkdir(parents=lowercase , exist_ok=lowercase )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
lowercase , lowercase , f=output_path.as_posix() , input_names=lowercase , output_names=lowercase , dynamic_axes=lowercase , do_constant_folding=lowercase , use_external_data_format=lowercase , enable_onnx_checker=lowercase , opset_version=lowercase , )
else:
export(
lowercase , lowercase , f=output_path.as_posix() , input_names=lowercase , output_names=lowercase , dynamic_axes=lowercase , do_constant_folding=lowercase , opset_version=lowercase , )
@torch.no_grad()
def _A ( lowercase , lowercase , lowercase , lowercase = False ):
"""simple docstring"""
a =torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
a ='''cuda'''
elif fpaa and not torch.cuda.is_available():
raise ValueError('''`float16` model export is only supported on GPUs with CUDA''' )
else:
a ='''cpu'''
a =StableDiffusionPipeline.from_pretrained(lowercase , torch_dtype=lowercase ).to(lowercase )
a =Path(lowercase )
# TEXT ENCODER
a =pipeline.text_encoder.config.max_position_embeddings
a =pipeline.text_encoder.config.hidden_size
a =pipeline.tokenizer(
'''A sample prompt''' , padding='''max_length''' , max_length=pipeline.tokenizer.model_max_length , truncation=lowercase , return_tensors='''pt''' , )
onnx_export(
pipeline.text_encoder , model_args=(text_input.input_ids.to(device=lowercase , dtype=torch.intaa )) , output_path=output_path / '''text_encoder''' / '''model.onnx''' , ordered_input_names=['''input_ids'''] , output_names=['''last_hidden_state''', '''pooler_output'''] , dynamic_axes={
'''input_ids''': {0: '''batch''', 1: '''sequence'''},
} , opset=lowercase , )
del pipeline.text_encoder
# UNET
a =pipeline.unet.config.in_channels
a =pipeline.unet.config.sample_size
a =output_path / '''unet''' / '''model.onnx'''
onnx_export(
pipeline.unet , model_args=(
torch.randn(2 , lowercase , lowercase , lowercase ).to(device=lowercase , dtype=lowercase ),
torch.randn(2 ).to(device=lowercase , dtype=lowercase ),
torch.randn(2 , lowercase , lowercase ).to(device=lowercase , dtype=lowercase ),
False,
) , output_path=lowercase , ordered_input_names=['''sample''', '''timestep''', '''encoder_hidden_states''', '''return_dict'''] , output_names=['''out_sample'''] , dynamic_axes={
'''sample''': {0: '''batch''', 1: '''channels''', 2: '''height''', 3: '''width'''},
'''timestep''': {0: '''batch'''},
'''encoder_hidden_states''': {0: '''batch''', 1: '''sequence'''},
} , opset=lowercase , use_external_data_format=lowercase , )
a =str(unet_path.absolute().as_posix() )
a =os.path.dirname(lowercase )
a =onnx.load(lowercase )
# clean up existing tensor files
shutil.rmtree(lowercase )
os.mkdir(lowercase )
# collate external tensor files into one
onnx.save_model(
lowercase , lowercase , save_as_external_data=lowercase , all_tensors_to_one_file=lowercase , location='''weights.pb''' , convert_attribute=lowercase , )
del pipeline.unet
# VAE ENCODER
a =pipeline.vae
a =vae_encoder.config.in_channels
a =vae_encoder.config.sample_size
# need to get the raw tensor output (sample) from the encoder
a =lambda lowercase , lowercase : vae_encoder.encode(lowercase , lowercase )[0].sample()
onnx_export(
lowercase , model_args=(
torch.randn(1 , lowercase , lowercase , lowercase ).to(device=lowercase , dtype=lowercase ),
False,
) , output_path=output_path / '''vae_encoder''' / '''model.onnx''' , ordered_input_names=['''sample''', '''return_dict'''] , output_names=['''latent_sample'''] , dynamic_axes={
'''sample''': {0: '''batch''', 1: '''channels''', 2: '''height''', 3: '''width'''},
} , opset=lowercase , )
# VAE DECODER
a =pipeline.vae
a =vae_decoder.config.latent_channels
a =vae_decoder.config.out_channels
# forward only through the decoder part
a =vae_encoder.decode
onnx_export(
lowercase , model_args=(
torch.randn(1 , lowercase , lowercase , lowercase ).to(device=lowercase , dtype=lowercase ),
False,
) , output_path=output_path / '''vae_decoder''' / '''model.onnx''' , ordered_input_names=['''latent_sample''', '''return_dict'''] , output_names=['''sample'''] , dynamic_axes={
'''latent_sample''': {0: '''batch''', 1: '''channels''', 2: '''height''', 3: '''width'''},
} , opset=lowercase , )
del pipeline.vae
# SAFETY CHECKER
if pipeline.safety_checker is not None:
a =pipeline.safety_checker
a =safety_checker.config.vision_config.num_channels
a =safety_checker.config.vision_config.image_size
a =safety_checker.forward_onnx
onnx_export(
pipeline.safety_checker , model_args=(
torch.randn(
1 , lowercase , lowercase , lowercase , ).to(device=lowercase , dtype=lowercase ),
torch.randn(1 , lowercase , lowercase , lowercase ).to(device=lowercase , dtype=lowercase ),
) , output_path=output_path / '''safety_checker''' / '''model.onnx''' , ordered_input_names=['''clip_input''', '''images'''] , output_names=['''out_images''', '''has_nsfw_concepts'''] , dynamic_axes={
'''clip_input''': {0: '''batch''', 1: '''channels''', 2: '''height''', 3: '''width'''},
'''images''': {0: '''batch''', 1: '''height''', 2: '''width''', 3: '''channels'''},
} , opset=lowercase , )
del pipeline.safety_checker
a =OnnxRuntimeModel.from_pretrained(output_path / '''safety_checker''' )
a =pipeline.feature_extractor
else:
a =None
a =None
a =OnnxStableDiffusionPipeline(
vae_encoder=OnnxRuntimeModel.from_pretrained(output_path / '''vae_encoder''' ) , vae_decoder=OnnxRuntimeModel.from_pretrained(output_path / '''vae_decoder''' ) , text_encoder=OnnxRuntimeModel.from_pretrained(output_path / '''text_encoder''' ) , tokenizer=pipeline.tokenizer , unet=OnnxRuntimeModel.from_pretrained(output_path / '''unet''' ) , scheduler=pipeline.scheduler , safety_checker=lowercase , feature_extractor=lowercase , requires_safety_checker=safety_checker is not None , )
onnx_pipeline.save_pretrained(lowercase )
print('''ONNX pipeline saved to''' , lowercase )
del pipeline
del onnx_pipeline
a =OnnxStableDiffusionPipeline.from_pretrained(lowercase , provider='''CPUExecutionProvider''' )
print('''ONNX pipeline is loadable''' )
if __name__ == "__main__":
lowerCamelCase_ : Tuple = argparse.ArgumentParser()
parser.add_argument(
"""--model_path""",
type=str,
required=True,
help="""Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).""",
)
parser.add_argument("""--output_path""", type=str, required=True, help="""Path to the output model.""")
parser.add_argument(
"""--opset""",
default=1_4,
type=int,
help="""The version of the ONNX operator set to use.""",
)
parser.add_argument("""--fp16""", action="""store_true""", default=False, help="""Export the models in `float16` mode""")
lowerCamelCase_ : Union[str, Any] = parser.parse_args()
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
| 81
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase = logging.get_logger(__name__)
__lowercase = {
'''facebook/wav2vec2-base-960h''': '''https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json''',
# See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2
}
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
a__ : Union[str, Any] = """wav2vec2"""
def __init__( self , __lowercase=32 , __lowercase=768 , __lowercase=12 , __lowercase=12 , __lowercase=3_072 , __lowercase="gelu" , __lowercase=0.1 , __lowercase=0.1 , __lowercase=0.1 , __lowercase=0.0 , __lowercase=0.0 , __lowercase=0.1 , __lowercase=0.1 , __lowercase=0.02 , __lowercase=1E-5 , __lowercase="group" , __lowercase="gelu" , __lowercase=(512, 512, 512, 512, 512, 512, 512) , __lowercase=(5, 2, 2, 2, 2, 2, 2) , __lowercase=(10, 3, 3, 3, 3, 2, 2) , __lowercase=False , __lowercase=128 , __lowercase=16 , __lowercase=False , __lowercase=True , __lowercase=0.05 , __lowercase=10 , __lowercase=2 , __lowercase=0.0 , __lowercase=10 , __lowercase=0 , __lowercase=320 , __lowercase=2 , __lowercase=0.1 , __lowercase=100 , __lowercase=256 , __lowercase=256 , __lowercase=0.1 , __lowercase="sum" , __lowercase=False , __lowercase=False , __lowercase=256 , __lowercase=(512, 512, 512, 512, 1_500) , __lowercase=(5, 3, 3, 1, 1) , __lowercase=(1, 2, 3, 1, 1) , __lowercase=512 , __lowercase=0 , __lowercase=1 , __lowercase=2 , __lowercase=False , __lowercase=3 , __lowercase=2 , __lowercase=3 , __lowercase=None , __lowercase=None , **__lowercase , ) -> int:
super().__init__(**__lowercase , pad_token_id=__lowercase , bos_token_id=__lowercase , eos_token_id=__lowercase)
__UpperCamelCase :Any = hidden_size
__UpperCamelCase :int = feat_extract_norm
__UpperCamelCase :Tuple = feat_extract_activation
__UpperCamelCase :Union[str, Any] = list(__lowercase)
__UpperCamelCase :List[Any] = list(__lowercase)
__UpperCamelCase :int = list(__lowercase)
__UpperCamelCase :List[Any] = conv_bias
__UpperCamelCase :Optional[int] = num_conv_pos_embeddings
__UpperCamelCase :Dict = num_conv_pos_embedding_groups
__UpperCamelCase :Any = len(self.conv_dim)
__UpperCamelCase :List[str] = num_hidden_layers
__UpperCamelCase :int = intermediate_size
__UpperCamelCase :str = hidden_act
__UpperCamelCase :Any = num_attention_heads
__UpperCamelCase :int = hidden_dropout
__UpperCamelCase :Tuple = attention_dropout
__UpperCamelCase :List[str] = activation_dropout
__UpperCamelCase :Optional[Any] = feat_proj_dropout
__UpperCamelCase :Any = final_dropout
__UpperCamelCase :Any = layerdrop
__UpperCamelCase :str = layer_norm_eps
__UpperCamelCase :Optional[Any] = initializer_range
__UpperCamelCase :List[str] = vocab_size
__UpperCamelCase :str = do_stable_layer_norm
__UpperCamelCase :Union[str, Any] = use_weighted_layer_sum
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
f""" {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`,"""
f""" `len(config.conv_kernel) = {len(self.conv_kernel)}`.""")
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__UpperCamelCase :List[Any] = apply_spec_augment
__UpperCamelCase :Tuple = mask_time_prob
__UpperCamelCase :int = mask_time_length
__UpperCamelCase :Dict = mask_time_min_masks
__UpperCamelCase :str = mask_feature_prob
__UpperCamelCase :List[str] = mask_feature_length
__UpperCamelCase :Union[str, Any] = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
__UpperCamelCase :Optional[Any] = num_codevectors_per_group
__UpperCamelCase :List[Any] = num_codevector_groups
__UpperCamelCase :Tuple = contrastive_logits_temperature
__UpperCamelCase :Optional[int] = feat_quantizer_dropout
__UpperCamelCase :Optional[int] = num_negatives
__UpperCamelCase :List[Any] = codevector_dim
__UpperCamelCase :str = proj_codevector_dim
__UpperCamelCase :List[str] = diversity_loss_weight
# ctc loss
__UpperCamelCase :Tuple = ctc_loss_reduction
__UpperCamelCase :Tuple = ctc_zero_infinity
# adapter
__UpperCamelCase :List[str] = add_adapter
__UpperCamelCase :Tuple = adapter_kernel_size
__UpperCamelCase :str = adapter_stride
__UpperCamelCase :Tuple = num_adapter_layers
__UpperCamelCase :Tuple = output_hidden_size or hidden_size
__UpperCamelCase :Optional[Any] = adapter_attn_dim
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
__UpperCamelCase :Optional[Any] = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
__UpperCamelCase :Optional[int] = list(__lowercase)
__UpperCamelCase :List[Any] = list(__lowercase)
__UpperCamelCase :List[Any] = list(__lowercase)
__UpperCamelCase :str = xvector_output_dim
@property
def UpperCamelCase__ ( self) -> List[str]:
return functools.reduce(operator.mul , self.conv_stride , 1)
| 43
| 0
|
def UpperCamelCase__( UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : int )->str:
return x if y == 0 else greatest_common_divisor(UpperCamelCase__ , x % y )
def UpperCamelCase__( UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Any )->Any:
return (x * y) // greatest_common_divisor(UpperCamelCase__ , UpperCamelCase__ )
def UpperCamelCase__( UpperCamelCase__ : str = 20 )->List[str]:
A__ = 1
for i in range(1 , n + 1 ):
A__ = lcm(UpperCamelCase__ , UpperCamelCase__ )
return g
if __name__ == "__main__":
print(F"{solution() = }")
| 193
|
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
__lowercase = logging.get_logger(__name__)
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
a__ : Optional[Any] = ["""pixel_values"""]
def __init__( self , __lowercase = True , __lowercase = 32 , __lowercase=PILImageResampling.BILINEAR , __lowercase = True , **__lowercase , ) -> None:
__UpperCamelCase :Optional[int] = do_resize
__UpperCamelCase :Any = do_rescale
__UpperCamelCase :str = size_divisor
__UpperCamelCase :Dict = resample
super().__init__(**__lowercase)
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase , __lowercase = None , **__lowercase) -> np.ndarray:
__UpperCamelCase , __UpperCamelCase :int = get_image_size(__lowercase)
# Rounds the height and width down to the closest multiple of size_divisor
__UpperCamelCase :List[Any] = height // size_divisor * size_divisor
__UpperCamelCase :List[str] = width // size_divisor * size_divisor
__UpperCamelCase :str = resize(__lowercase , (new_h, new_w) , resample=__lowercase , data_format=__lowercase , **__lowercase)
return image
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase = None , **__lowercase) -> np.ndarray:
return rescale(image=__lowercase , scale=__lowercase , data_format=__lowercase , **__lowercase)
def UpperCamelCase__ ( self , __lowercase , __lowercase = None , __lowercase = None , __lowercase=None , __lowercase = None , __lowercase = None , __lowercase = ChannelDimension.FIRST , **__lowercase , ) -> BatchFeature:
__UpperCamelCase :Union[str, Any] = do_resize if do_resize is not None else self.do_resize
__UpperCamelCase :Tuple = do_rescale if do_rescale is not None else self.do_rescale
__UpperCamelCase :List[str] = size_divisor if size_divisor is not None else self.size_divisor
__UpperCamelCase :List[Any] = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError('''size_divisor is required for resizing''')
__UpperCamelCase :List[Any] = make_list_of_images(__lowercase)
if not valid_images(__lowercase):
raise ValueError('''Invalid image(s)''')
# All transformations expect numpy arrays.
__UpperCamelCase :Optional[Any] = [to_numpy_array(__lowercase) for img in images]
if do_resize:
__UpperCamelCase :List[str] = [self.resize(__lowercase , size_divisor=__lowercase , resample=__lowercase) for image in images]
if do_rescale:
__UpperCamelCase :Dict = [self.rescale(__lowercase , scale=1 / 255) for image in images]
__UpperCamelCase :str = [to_channel_dimension_format(__lowercase , __lowercase) for image in images]
__UpperCamelCase :int = {'''pixel_values''': images}
return BatchFeature(data=__lowercase , tensor_type=__lowercase)
| 43
| 0
|
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class _lowerCamelCase ( UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ : Any =CTRLTokenizer
UpperCAmelCase_ : Any =False
UpperCAmelCase_ : Tuple =False
def UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__snake_case : int = ['''adapt''', '''re@@''', '''a@@''', '''apt''', '''c@@''', '''t''', '''<unk>''']
__snake_case : Union[str, Any] = dict(zip(__lowercase , range(len(__lowercase ) ) ) )
__snake_case : Dict = ['''#version: 0.2''', '''a p''', '''ap t</w>''', '''r e''', '''a d''', '''ad apt</w>''', '''''']
__snake_case : List[str] = {'''unk_token''': '''<unk>'''}
__snake_case : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
__snake_case : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(__lowercase ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(__lowercase ) )
def UpperCAmelCase ( self , **UpperCAmelCase ) -> Dict:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname , **__lowercase )
def UpperCAmelCase ( self , UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
__snake_case : str = '''adapt react readapt apt'''
__snake_case : Optional[Any] = '''adapt react readapt apt'''
return input_text, output_text
def UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
__snake_case : Any = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
__snake_case : Dict = '''adapt react readapt apt'''
__snake_case : Optional[Any] = '''adapt re@@ a@@ c@@ t re@@ adapt apt'''.split()
__snake_case : Any = tokenizer.tokenize(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
__snake_case : Optional[Any] = tokens + [tokenizer.unk_token]
__snake_case : List[str] = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowercase ) , __lowercase )
| 326
|
from __future__ import annotations
from PIL import Image
# Define glider example
__lowercase = [
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
# Define blinker example
__lowercase = [[0, 1, 0], [0, 1, 0], [0, 1, 0]]
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :int = []
for i in range(len(SCREAMING_SNAKE_CASE ) ):
__UpperCamelCase :Dict = []
for j in range(len(cells[i] ) ):
# Get the number of live neighbours
__UpperCamelCase :List[str] = 0
if i > 0 and j > 0:
neighbour_count += cells[i - 1][j - 1]
if i > 0:
neighbour_count += cells[i - 1][j]
if i > 0 and j < len(cells[i] ) - 1:
neighbour_count += cells[i - 1][j + 1]
if j > 0:
neighbour_count += cells[i][j - 1]
if j < len(cells[i] ) - 1:
neighbour_count += cells[i][j + 1]
if i < len(SCREAMING_SNAKE_CASE ) - 1 and j > 0:
neighbour_count += cells[i + 1][j - 1]
if i < len(SCREAMING_SNAKE_CASE ) - 1:
neighbour_count += cells[i + 1][j]
if i < len(SCREAMING_SNAKE_CASE ) - 1 and j < len(cells[i] ) - 1:
neighbour_count += cells[i + 1][j + 1]
# Rules of the game of life (excerpt from Wikipedia):
# 1. Any live cell with two or three live neighbours survives.
# 2. Any dead cell with three live neighbours becomes a live cell.
# 3. All other live cells die in the next generation.
# Similarly, all other dead cells stay dead.
__UpperCamelCase :List[str] = cells[i][j] == 1
if (
(alive and 2 <= neighbour_count <= 3)
or not alive
and neighbour_count == 3
):
next_generation_row.append(1 )
else:
next_generation_row.append(0 )
next_generation.append(SCREAMING_SNAKE_CASE )
return next_generation
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Optional[Any] = []
for _ in range(SCREAMING_SNAKE_CASE ):
# Create output image
__UpperCamelCase :Dict = Image.new('''RGB''' , (len(cells[0] ), len(SCREAMING_SNAKE_CASE )) )
__UpperCamelCase :Any = img.load()
# Save cells to image
for x in range(len(SCREAMING_SNAKE_CASE ) ):
for y in range(len(cells[0] ) ):
__UpperCamelCase :Optional[Any] = 255 - cells[y][x] * 255
__UpperCamelCase :int = (colour, colour, colour)
# Save image
images.append(SCREAMING_SNAKE_CASE )
__UpperCamelCase :Optional[int] = new_generation(SCREAMING_SNAKE_CASE )
return images
if __name__ == "__main__":
__lowercase = generate_images(GLIDER, 16)
images[0].save('''out.gif''', save_all=True, append_images=images[1:])
| 43
| 0
|
'''simple docstring'''
import math
import unittest
from transformers import BioGptConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptTokenizer,
)
from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST
class _a :
'''simple docstring'''
def __init__( self, A, A=13, A=7, A=True, A=True, A=False, A=True, A=99, A=32, A=5, A=4, A=37, A="gelu", A=0.1, A=0.1, A=512, A=16, A=2, A=0.02, A=3, A=4, A=None, ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = parent
SCREAMING_SNAKE_CASE : Union[str, Any] = batch_size
SCREAMING_SNAKE_CASE : int = seq_length
SCREAMING_SNAKE_CASE : Optional[Any] = is_training
SCREAMING_SNAKE_CASE : Any = use_input_mask
SCREAMING_SNAKE_CASE : Any = use_token_type_ids
SCREAMING_SNAKE_CASE : str = use_labels
SCREAMING_SNAKE_CASE : int = vocab_size
SCREAMING_SNAKE_CASE : Optional[int] = hidden_size
SCREAMING_SNAKE_CASE : str = num_hidden_layers
SCREAMING_SNAKE_CASE : Union[str, Any] = num_attention_heads
SCREAMING_SNAKE_CASE : Dict = intermediate_size
SCREAMING_SNAKE_CASE : Tuple = hidden_act
SCREAMING_SNAKE_CASE : Dict = hidden_dropout_prob
SCREAMING_SNAKE_CASE : str = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Union[str, Any] = max_position_embeddings
SCREAMING_SNAKE_CASE : Dict = type_vocab_size
SCREAMING_SNAKE_CASE : Optional[int] = type_sequence_label_size
SCREAMING_SNAKE_CASE : Tuple = initializer_range
SCREAMING_SNAKE_CASE : Optional[Any] = num_labels
SCREAMING_SNAKE_CASE : Any = num_choices
SCREAMING_SNAKE_CASE : Dict = scope
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
SCREAMING_SNAKE_CASE : Union[str, Any] = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE : Dict = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE : str = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
SCREAMING_SNAKE_CASE : List[str] = None
SCREAMING_SNAKE_CASE : Any = None
SCREAMING_SNAKE_CASE : str = None
if self.use_labels:
SCREAMING_SNAKE_CASE : int = ids_tensor([self.batch_size], self.type_sequence_label_size )
SCREAMING_SNAKE_CASE : Any = ids_tensor([self.batch_size, self.seq_length], self.num_labels )
SCREAMING_SNAKE_CASE : str = ids_tensor([self.batch_size], self.num_choices )
SCREAMING_SNAKE_CASE : str = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase_ ( self ):
'''simple docstring'''
return BioGptConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=__lowercase, initializer_range=self.initializer_range, )
def UpperCamelCase_ ( self, A, A, A, A, A, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = BioGptModel(config=__lowercase )
model.to(__lowercase )
model.eval()
SCREAMING_SNAKE_CASE : int = model(__lowercase, attention_mask=__lowercase )
SCREAMING_SNAKE_CASE : int = model(__lowercase )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase_ ( self, A, A, A, A, A, A, A, A, A, ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = BioGptForCausalLM(config=__lowercase )
model.to(__lowercase )
model.eval()
SCREAMING_SNAKE_CASE : Any = model(__lowercase, attention_mask=__lowercase, token_type_ids=__lowercase, labels=__lowercase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase_ ( self, A, A, A, A, A, *A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = BioGptModel(config=__lowercase )
model.to(__lowercase )
model.eval()
# create attention mask
SCREAMING_SNAKE_CASE : Tuple = torch.ones(input_ids.shape, dtype=torch.long, device=__lowercase )
SCREAMING_SNAKE_CASE : Optional[Any] = self.seq_length // 2
SCREAMING_SNAKE_CASE : Tuple = 0
# first forward pass
SCREAMING_SNAKE_CASE : Optional[int] = model(__lowercase, attention_mask=__lowercase ).to_tuple()
# create hypothetical next token and extent to next_input_ids
SCREAMING_SNAKE_CASE : List[Any] = ids_tensor((self.batch_size, 1), config.vocab_size )
# change a random masked slice from input_ids
SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor((1,), __lowercase ).item() + 1
SCREAMING_SNAKE_CASE : str = ids_tensor((self.batch_size, 1), config.vocab_size ).squeeze(-1 )
SCREAMING_SNAKE_CASE : int = random_other_next_tokens
# append to next input_ids and attn_mask
SCREAMING_SNAKE_CASE : Tuple = torch.cat([input_ids, next_tokens], dim=-1 )
SCREAMING_SNAKE_CASE : List[str] = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1), dtype=torch.long, device=__lowercase )], dim=1, )
# get two different outputs
SCREAMING_SNAKE_CASE : List[str] = model(__lowercase, attention_mask=__lowercase )['''last_hidden_state''']
SCREAMING_SNAKE_CASE : Optional[int] = model(__lowercase, past_key_values=__lowercase, attention_mask=__lowercase )['''last_hidden_state''']
# select random slice
SCREAMING_SNAKE_CASE : str = ids_tensor((1,), output_from_past.shape[-1] ).item()
SCREAMING_SNAKE_CASE : str = output_from_no_past[:, -1, random_slice_idx].detach()
SCREAMING_SNAKE_CASE : Tuple = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__lowercase, __lowercase, atol=1E-3 ) )
def UpperCamelCase_ ( self, A, A, A, A, A, *A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = BioGptModel(config=__lowercase ).to(__lowercase ).eval()
SCREAMING_SNAKE_CASE : Optional[Any] = torch.ones(input_ids.shape, dtype=torch.long, device=__lowercase )
# first forward pass
SCREAMING_SNAKE_CASE : Dict = model(__lowercase, attention_mask=__lowercase, use_cache=__lowercase )
SCREAMING_SNAKE_CASE : Optional[int] = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor((self.batch_size, 3), config.vocab_size )
SCREAMING_SNAKE_CASE : List[str] = ids_tensor((self.batch_size, 3), 2 )
# append to next input_ids and
SCREAMING_SNAKE_CASE : Optional[Any] = torch.cat([input_ids, next_tokens], dim=-1 )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.cat([attention_mask, next_attn_mask], dim=-1 )
SCREAMING_SNAKE_CASE : Tuple = model(__lowercase, attention_mask=__lowercase )['''last_hidden_state''']
SCREAMING_SNAKE_CASE : Optional[Any] = model(__lowercase, attention_mask=__lowercase, past_key_values=__lowercase )[
'''last_hidden_state'''
]
# select random slice
SCREAMING_SNAKE_CASE : Tuple = ids_tensor((1,), output_from_past.shape[-1] ).item()
SCREAMING_SNAKE_CASE : int = output_from_no_past[:, -3:, random_slice_idx].detach()
SCREAMING_SNAKE_CASE : Optional[int] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__lowercase, __lowercase, atol=1E-3 ) )
def UpperCamelCase_ ( self, A, A, A, A, A, *A, A=False ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = BioGptForCausalLM(__lowercase )
model.to(__lowercase )
if gradient_checkpointing:
model.gradient_checkpointing_enable()
SCREAMING_SNAKE_CASE : str = model(__lowercase, labels=__lowercase )
self.parent.assertEqual(result.loss.shape, () )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
result.loss.backward()
def UpperCamelCase_ ( self, A, *A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = BioGptModel(__lowercase )
SCREAMING_SNAKE_CASE : int = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers )
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ), 0.0_01 )
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ), 0.01 )
def UpperCamelCase_ ( self, A, A, A, A, A, *A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.num_labels
SCREAMING_SNAKE_CASE : List[Any] = BioGptForTokenClassification(__lowercase )
model.to(__lowercase )
model.eval()
SCREAMING_SNAKE_CASE : List[str] = model(__lowercase, attention_mask=__lowercase, token_type_ids=__lowercase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.prepare_config_and_inputs()
(
SCREAMING_SNAKE_CASE
) : Union[str, Any] = config_and_inputs
SCREAMING_SNAKE_CASE : Any = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class _a ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
A : Optional[int] = (
(BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification)
if is_torch_available()
else ()
)
A : str = (BioGptForCausalLM,) if is_torch_available() else ()
A : str = (
{
"""feature-extraction""": BioGptModel,
"""text-classification""": BioGptForSequenceClassification,
"""text-generation""": BioGptForCausalLM,
"""token-classification""": BioGptForTokenClassification,
"""zero-shot""": BioGptForSequenceClassification,
}
if is_torch_available()
else {}
)
A : Any = False
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = BioGptModelTester(self )
SCREAMING_SNAKE_CASE : List[Any] = ConfigTester(self, config_class=__lowercase, hidden_size=37 )
def UpperCamelCase_ ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowercase )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
SCREAMING_SNAKE_CASE : Optional[Any] = type
self.model_tester.create_and_check_model(*__lowercase )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_attention_mask_past(*__lowercase )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*__lowercase, gradient_checkpointing=__lowercase )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_past_large_inputs(*__lowercase )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_weight_initialization(*__lowercase )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_for_token_classification(*__lowercase )
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = BioGptForCausalLM.from_pretrained('microsoft/biogpt' )
model.to(__lowercase )
SCREAMING_SNAKE_CASE : Dict = BioGptTokenizer.from_pretrained('microsoft/biogpt' )
SCREAMING_SNAKE_CASE : Optional[int] = '''left'''
# Define PAD Token = EOS Token = 50256
SCREAMING_SNAKE_CASE : List[str] = tokenizer.eos_token
SCREAMING_SNAKE_CASE : Dict = model.config.eos_token_id
# use different length sentences to test batching
SCREAMING_SNAKE_CASE : str = [
'''Hello, my dog is a little''',
'''Today, I''',
]
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer(__lowercase, return_tensors='pt', padding=__lowercase )
SCREAMING_SNAKE_CASE : str = inputs['''input_ids'''].to(__lowercase )
SCREAMING_SNAKE_CASE : Any = model.generate(
input_ids=__lowercase, attention_mask=inputs['attention_mask'].to(__lowercase ), )
SCREAMING_SNAKE_CASE : str = tokenizer(sentences[0], return_tensors='pt' ).input_ids.to(__lowercase )
SCREAMING_SNAKE_CASE : Union[str, Any] = model.generate(input_ids=__lowercase )
SCREAMING_SNAKE_CASE : Dict = inputs_non_padded.shape[-1] - inputs['''attention_mask'''][-1].long().sum().cpu().item()
SCREAMING_SNAKE_CASE : List[Any] = tokenizer(sentences[1], return_tensors='pt' ).input_ids.to(__lowercase )
SCREAMING_SNAKE_CASE : Union[str, Any] = model.generate(input_ids=__lowercase, max_length=model.config.max_length - num_paddings )
SCREAMING_SNAKE_CASE : str = tokenizer.batch_decode(__lowercase, skip_special_tokens=__lowercase )
SCREAMING_SNAKE_CASE : str = tokenizer.decode(output_non_padded[0], skip_special_tokens=__lowercase )
SCREAMING_SNAKE_CASE : int = tokenizer.decode(output_padded[0], skip_special_tokens=__lowercase )
SCREAMING_SNAKE_CASE : Optional[Any] = [
'''Hello, my dog is a little bit bigger than a little bit.''',
'''Today, I have a good idea of how to use the information''',
]
self.assertListEqual(__lowercase, __lowercase )
self.assertListEqual(__lowercase, [non_padded_sentence, padded_sentence] )
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : Optional[int] = BioGptModel.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : List[str] = 3
SCREAMING_SNAKE_CASE : Optional[int] = input_dict['''input_ids''']
SCREAMING_SNAKE_CASE : Dict = input_ids.ne(1 ).to(__lowercase )
SCREAMING_SNAKE_CASE : Dict = ids_tensor([self.model_tester.batch_size], self.model_tester.type_sequence_label_size )
SCREAMING_SNAKE_CASE : Optional[Any] = BioGptForSequenceClassification(__lowercase )
model.to(__lowercase )
model.eval()
SCREAMING_SNAKE_CASE : Dict = model(__lowercase, attention_mask=__lowercase, labels=__lowercase )
self.assertEqual(result.logits.shape, (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : Tuple = 3
SCREAMING_SNAKE_CASE : Optional[Any] = '''multi_label_classification'''
SCREAMING_SNAKE_CASE : Union[str, Any] = input_dict['''input_ids''']
SCREAMING_SNAKE_CASE : Optional[int] = input_ids.ne(1 ).to(__lowercase )
SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor(
[self.model_tester.batch_size, config.num_labels], self.model_tester.type_sequence_label_size ).to(torch.float )
SCREAMING_SNAKE_CASE : Tuple = BioGptForSequenceClassification(__lowercase )
model.to(__lowercase )
model.eval()
SCREAMING_SNAKE_CASE : Union[str, Any] = model(__lowercase, attention_mask=__lowercase, labels=__lowercase )
self.assertEqual(result.logits.shape, (self.model_tester.batch_size, self.model_tester.num_labels) )
@require_torch
class _a ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = BioGptForCausalLM.from_pretrained('microsoft/biogpt' )
SCREAMING_SNAKE_CASE : str = torch.tensor([[2, 4_805, 9, 656, 21]] )
SCREAMING_SNAKE_CASE : int = model(__lowercase )[0]
SCREAMING_SNAKE_CASE : List[str] = 42_384
SCREAMING_SNAKE_CASE : str = torch.Size((1, 5, vocab_size) )
self.assertEqual(output.shape, __lowercase )
SCREAMING_SNAKE_CASE : List[Any] = torch.tensor(
[[[-9.52_36, -9.89_18, 10.45_57], [-11.04_69, -9.64_23, 8.10_22], [-8.86_64, -7.88_26, 5.53_25]]] )
self.assertTrue(torch.allclose(output[:, :3, :3], __lowercase, atol=1E-4 ) )
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = BioGptTokenizer.from_pretrained('microsoft/biogpt' )
SCREAMING_SNAKE_CASE : Optional[Any] = BioGptForCausalLM.from_pretrained('microsoft/biogpt' )
model.to(__lowercase )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : List[str] = tokenizer('COVID-19 is', return_tensors='pt' ).to(__lowercase )
SCREAMING_SNAKE_CASE : Optional[int] = model.generate(
**__lowercase, min_length=100, max_length=1_024, num_beams=5, early_stopping=__lowercase, )
SCREAMING_SNAKE_CASE : List[str] = tokenizer.decode(output_ids[0], skip_special_tokens=__lowercase )
SCREAMING_SNAKE_CASE : str = (
'''COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the'''
''' causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and'''
''' territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),'''
''' and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and'''
''' more than 800,000 deaths.'''
)
self.assertEqual(__lowercase, __lowercase )
| 251
|
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
__lowercase = logging.get_logger(__name__)
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Tuple = R'''\w+[.]\d+'''
__UpperCamelCase :List[str] = re.findall(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for pat in pats:
__UpperCamelCase :int = key.replace(SCREAMING_SNAKE_CASE , '''_'''.join(pat.split('''.''' ) ) )
return key
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Tuple = pt_tuple_key[:-1] + ('''scale''',)
if (
any('''norm''' in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
__UpperCamelCase :str = pt_tuple_key[:-1] + ('''scale''',)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
__UpperCamelCase :Any = pt_tuple_key[:-1] + ('''scale''',)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
__UpperCamelCase :str = pt_tuple_key[:-1] + ('''embedding''',)
return renamed_pt_tuple_key, pt_tensor
# conv layer
__UpperCamelCase :List[str] = pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
__UpperCamelCase :List[Any] = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
__UpperCamelCase :List[str] = pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight":
__UpperCamelCase :Any = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
__UpperCamelCase :int = pt_tuple_key[:-1] + ('''weight''',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
__UpperCamelCase :int = pt_tuple_key[:-1] + ('''bias''',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=42 ):
'''simple docstring'''
__UpperCamelCase :Union[str, Any] = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
__UpperCamelCase :str = flax_model.init_weights(PRNGKey(SCREAMING_SNAKE_CASE ) )
__UpperCamelCase :int = flatten_dict(SCREAMING_SNAKE_CASE )
__UpperCamelCase :List[Any] = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
__UpperCamelCase :List[Any] = rename_key(SCREAMING_SNAKE_CASE )
__UpperCamelCase :List[Any] = tuple(renamed_pt_key.split('''.''' ) )
# Correctly rename weight parameters
__UpperCamelCase , __UpperCamelCase :Any = rename_key_and_reshape_tensor(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f"""PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape """
f"""{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
# also add unexpected weight so that warning is thrown
__UpperCamelCase :str = jnp.asarray(SCREAMING_SNAKE_CASE )
return unflatten_dict(SCREAMING_SNAKE_CASE )
| 43
| 0
|
'''simple docstring'''
import argparse
import gc
import json
import os
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
__a = 16
__a = 32
def __UpperCAmelCase ( a_: List[str] ):
return int(x / 2**20 )
class A__ :
"""simple docstring"""
def __enter__( self : int ) -> Any:
"""simple docstring"""
gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero
_UpperCAmelCase : Any = torch.cuda.memory_allocated()
return self
def __exit__( self : str , *lowerCAmelCase__ : Dict ) -> str:
"""simple docstring"""
gc.collect()
torch.cuda.empty_cache()
_UpperCAmelCase : int = torch.cuda.memory_allocated()
_UpperCAmelCase : str = torch.cuda.max_memory_allocated()
_UpperCAmelCase : Tuple = bamb(self.end - self.begin )
_UpperCAmelCase : str = bamb(self.peak - self.begin )
# print(f"delta used/peak {self.used:4d}/{self.peaked:4d}")
def __UpperCAmelCase ( a_: Union[str, Any], a_: Optional[int] = 16, a_: Any = "bert-base-cased", a_: List[Any] = 320, a_: Union[str, Any] = 160, ):
_UpperCAmelCase : Dict = AutoTokenizer.from_pretrained(a_ )
_UpperCAmelCase : Tuple = load_dataset(
"glue", "mrpc", split={"train": f"""train[:{n_train}]""", "validation": f"""validation[:{n_val}]"""} )
def tokenize_function(a_: Tuple ):
# max_length=None => use the model max length (it's actually the default)
_UpperCAmelCase : Dict = tokenizer(examples["sentence1"], examples["sentence2"], truncation=a_, max_length=a_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
_UpperCAmelCase : Any = datasets.map(
a_, batched=a_, remove_columns=["idx", "sentence1", "sentence2"], load_from_cache_file=a_ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_UpperCAmelCase : List[Any] = tokenized_datasets.rename_column("label", "labels" )
def collate_fn(a_: Dict ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(a_, padding="max_length", max_length=128, return_tensors="pt" )
return tokenizer.pad(a_, padding="longest", return_tensors="pt" )
# Instantiate dataloaders.
_UpperCAmelCase : List[Any] = DataLoader(
tokenized_datasets["train"], shuffle=a_, collate_fn=a_, batch_size=a_ )
_UpperCAmelCase : Dict = DataLoader(
tokenized_datasets["validation"], shuffle=a_, collate_fn=a_, batch_size=a_ )
return train_dataloader, eval_dataloader
def __UpperCAmelCase ( a_: Tuple, a_: Dict ):
_UpperCAmelCase : int = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_UpperCAmelCase : Any = config['''lr''']
_UpperCAmelCase : Tuple = int(config["num_epochs"] )
_UpperCAmelCase : Union[str, Any] = int(config["seed"] )
_UpperCAmelCase : Any = int(config["batch_size"] )
_UpperCAmelCase : Dict = args.model_name_or_path
set_seed(a_ )
_UpperCAmelCase : List[Any] = get_dataloaders(a_, a_, a_, args.n_train, args.n_val )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_UpperCAmelCase : Optional[int] = AutoModelForSequenceClassification.from_pretrained(a_, return_dict=a_ )
# Instantiate optimizer
_UpperCAmelCase : Dict = (
AdamW
if accelerator.state.deepspeed_plugin is None
or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
_UpperCAmelCase : Union[str, Any] = optimizer_cls(params=model.parameters(), lr=a_ )
if accelerator.state.deepspeed_plugin is not None:
_UpperCAmelCase : Dict = accelerator.state.deepspeed_plugin.deepspeed_config[
'''gradient_accumulation_steps'''
]
else:
_UpperCAmelCase : int = 1
_UpperCAmelCase : List[Any] = (len(a_ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
_UpperCAmelCase : List[Any] = get_linear_schedule_with_warmup(
optimizer=a_, num_warmup_steps=0, num_training_steps=a_, )
else:
_UpperCAmelCase : List[Any] = DummyScheduler(a_, total_num_steps=a_, warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_UpperCAmelCase : Tuple = accelerator.prepare(
a_, a_, a_, a_, a_ )
# We need to keep track of how many total steps we have iterated over
_UpperCAmelCase : Tuple = 0
# We also need to keep track of the stating epoch so files are named properly
_UpperCAmelCase : List[str] = 0
# Now we train the model
_UpperCAmelCase : List[str] = {}
for epoch in range(a_, a_ ):
with TorchTracemalloc() as tracemalloc:
model.train()
for step, batch in enumerate(a_ ):
_UpperCAmelCase : List[Any] = model(**a_ )
_UpperCAmelCase : Union[str, Any] = outputs.loss
_UpperCAmelCase : Tuple = loss / gradient_accumulation_steps
accelerator.backward(a_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
# Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage
accelerator.print("Memory before entering the train : {}".format(bamb(tracemalloc.begin ) ) )
accelerator.print("Memory consumed at the end of the train (end-begin): {}".format(tracemalloc.used ) )
accelerator.print("Peak Memory consumed during the train (max-begin): {}".format(tracemalloc.peaked ) )
accelerator.print(
"Total Peak Memory consumed during the train (max): {}".format(
tracemalloc.peaked + bamb(tracemalloc.begin ) ) )
_UpperCAmelCase : Tuple = tracemalloc.peaked + bamb(tracemalloc.begin )
if args.peak_memory_upper_bound is not None:
assert (
train_total_peak_memory[f"""epoch-{epoch}"""] <= args.peak_memory_upper_bound
), "Peak memory usage exceeded the upper bound"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir, "peak_memory_utilization.json" ), "w" ) as f:
json.dump(a_, a_ )
def __UpperCAmelCase ( ):
_UpperCAmelCase : int = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage." )
parser.add_argument(
"--model_name_or_path", type=a_, default="bert-base-cased", help="Path to pretrained model or model identifier from huggingface.co/models.", required=a_, )
parser.add_argument(
"--output_dir", type=a_, default=".", help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory.", )
parser.add_argument(
"--peak_memory_upper_bound", type=a_, default=a_, help="The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value.", )
parser.add_argument(
"--n_train", type=a_, default=320, help="Number of training examples to use.", )
parser.add_argument(
"--n_val", type=a_, default=160, help="Number of validation examples to use.", )
parser.add_argument(
"--num_epochs", type=a_, default=1, help="Number of train epochs.", )
_UpperCAmelCase : Union[str, Any] = parser.parse_args()
_UpperCAmelCase : Optional[int] = {'''lr''': 2e-5, '''num_epochs''': args.num_epochs, '''seed''': 42, '''batch_size''': 16}
training_function(a_, a_ )
if __name__ == "__main__":
main()
| 145
|
import argparse
import torch
from ...utils import logging
from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert
logging.set_verbosity_info()
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :List[Any] = AlbertConfig.from_json_file(SCREAMING_SNAKE_CASE )
print(f"""Building PyTorch model from configuration: {config}""" )
__UpperCamelCase :List[str] = AlbertForPreTraining(SCREAMING_SNAKE_CASE )
# Load weights from tf checkpoint
load_tf_weights_in_albert(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--albert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained ALBERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__lowercase = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
| 43
| 0
|
from collections.abc import Callable
class UpperCAmelCase_ :
def __init__( self, __a = None):
'''simple docstring'''
_lowerCAmelCase : list = []
# Stores indexes of each item for supporting updates and deletion.
_lowerCAmelCase : dict = {}
# Stores current size of heap.
_lowerCAmelCase : str = 0
# Stores function used to evaluate the score of an item on which basis ordering
# will be done.
_lowerCAmelCase : int = key or (lambda __a: x)
def snake_case__ ( self, __a):
'''simple docstring'''
return int((i - 1) / 2) if i > 0 else None
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = int(2 * i + 1)
return left if 0 < left < self.size else None
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = int(2 * i + 2)
return right if 0 < right < self.size else None
def snake_case__ ( self, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = (
self.pos_map[self.arr[j][0]],
self.pos_map[self.arr[i][0]],
)
# Then swap the items in the list.
_lowerCAmelCase : List[Any] = self.arr[j], self.arr[i]
def snake_case__ ( self, __a, __a):
'''simple docstring'''
return self.arr[i][1] < self.arr[j][1]
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : List[str] = self._left(__lowercase)
_lowerCAmelCase : Tuple = self._right(__lowercase)
_lowerCAmelCase : List[str] = i
if left is not None and not self._cmp(__lowercase, __lowercase):
_lowerCAmelCase : Optional[Any] = left
if right is not None and not self._cmp(__lowercase, __lowercase):
_lowerCAmelCase : Optional[Any] = right
return valid_parent
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self._parent(__lowercase)
while parent is not None and not self._cmp(__lowercase, __lowercase):
self._swap(__lowercase, __lowercase)
_lowerCAmelCase : Union[str, Any] = parent, self._parent(__lowercase)
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : List[str] = self._get_valid_parent(__lowercase)
while valid_parent != index:
self._swap(__lowercase, __lowercase)
_lowerCAmelCase : Dict = valid_parent, self._get_valid_parent(__lowercase)
def snake_case__ ( self, __a, __a):
'''simple docstring'''
if item not in self.pos_map:
return
_lowerCAmelCase : Any = self.pos_map[item]
_lowerCAmelCase : Union[str, Any] = [item, self.key(__lowercase)]
# Make sure heap is right in both up and down direction.
# Ideally only one of them will make any change.
self._heapify_up(__lowercase)
self._heapify_down(__lowercase)
def snake_case__ ( self, __a):
'''simple docstring'''
if item not in self.pos_map:
return
_lowerCAmelCase : Any = self.pos_map[item]
del self.pos_map[item]
_lowerCAmelCase : Any = self.arr[self.size - 1]
_lowerCAmelCase : Optional[int] = index
self.size -= 1
# Make sure heap is right in both up and down direction. Ideally only one
# of them will make any change- so no performance loss in calling both.
if self.size > index:
self._heapify_up(__lowercase)
self._heapify_down(__lowercase)
def snake_case__ ( self, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = len(self.arr)
if arr_len == self.size:
self.arr.append([item, self.key(__lowercase)])
else:
_lowerCAmelCase : Optional[Any] = [item, self.key(__lowercase)]
_lowerCAmelCase : List[Any] = self.size
self.size += 1
self._heapify_up(self.size - 1)
def snake_case__ ( self):
'''simple docstring'''
return self.arr[0] if self.size else None
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = self.get_top()
if top_item_tuple:
self.delete_item(top_item_tuple[0])
return top_item_tuple
def A ( ):
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 36
|
import math
import qiskit
def lowerCamelCase ( SCREAMING_SNAKE_CASE = 1 , SCREAMING_SNAKE_CASE = 1 , SCREAMING_SNAKE_CASE = 1 ):
'''simple docstring'''
if (
isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
or isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
or isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
):
raise TypeError('''inputs must be integers.''' )
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError('''inputs must be positive.''' )
if (
(math.floor(SCREAMING_SNAKE_CASE ) != input_a)
or (math.floor(SCREAMING_SNAKE_CASE ) != input_a)
or (math.floor(SCREAMING_SNAKE_CASE ) != carry_in)
):
raise ValueError('''inputs must be exact integers.''' )
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError('''inputs must be less or equal to 2.''' )
# build registers
__UpperCamelCase :List[str] = qiskit.QuantumRegister(4 , '''qr''' )
__UpperCamelCase :str = qiskit.ClassicalRegister(2 , '''cr''' )
# list the entries
__UpperCamelCase :Tuple = [input_a, input_a, carry_in]
__UpperCamelCase :Optional[int] = qiskit.QuantumCircuit(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for i in range(0 , 3 ):
if entry[i] == 2:
quantum_circuit.h(SCREAMING_SNAKE_CASE ) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(SCREAMING_SNAKE_CASE ) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(SCREAMING_SNAKE_CASE ) # for 0 entries
# build the circuit
quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate
quantum_circuit.cx(0 , 1 )
quantum_circuit.ccx(1 , 2 , 3 )
quantum_circuit.cx(1 , 2 )
quantum_circuit.cx(0 , 1 )
quantum_circuit.measure([2, 3] , SCREAMING_SNAKE_CASE ) # measure the last two qbits
__UpperCamelCase :Optional[Any] = qiskit.Aer.get_backend('''aer_simulator''' )
__UpperCamelCase :Tuple = qiskit.execute(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , shots=1_000 )
return job.result().get_counts(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
print(F'Total sum count for state is: {quantum_full_adder(1, 1, 1)}')
| 43
| 0
|
import argparse
import os
import jax as jnp
import numpy as onp
import torch
import torch.nn as nn
from music_spectrogram_diffusion import inference
from tax import checkpoints
from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline
from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder
__A = "base_with_context"
def lowerCAmelCase_ ( __a , __a ) -> List[str]:
"""simple docstring"""
lowerCamelCase__: Optional[Any] =nn.Parameter(torch.FloatTensor(weights["token_embedder"]["embedding"] ) )
lowerCamelCase__: Any =nn.Parameter(
torch.FloatTensor(weights["Embed_0"]["embedding"] ) , requires_grad=__a )
for lyr_num, lyr in enumerate(model.encoders ):
lowerCamelCase__: Any =weights[F"""layers_{lyr_num}"""]
lowerCamelCase__: List[Any] =nn.Parameter(
torch.FloatTensor(ly_weight["pre_attention_layer_norm"]["scale"] ) )
lowerCamelCase__: Union[str, Any] =ly_weight['''attention''']
lowerCamelCase__: Any =nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) )
lowerCamelCase__: int =nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) )
lowerCamelCase__: Tuple =nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) )
lowerCamelCase__: Tuple =nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) )
lowerCamelCase__: List[str] =nn.Parameter(torch.FloatTensor(ly_weight["pre_mlp_layer_norm"]["scale"] ) )
lowerCamelCase__: List[str] =nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_0"]["kernel"].T ) )
lowerCamelCase__: int =nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_1"]["kernel"].T ) )
lowerCamelCase__: List[str] =nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wo"]["kernel"].T ) )
lowerCamelCase__: str =nn.Parameter(torch.FloatTensor(weights["encoder_norm"]["scale"] ) )
return model
def lowerCAmelCase_ ( __a , __a ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase__: List[Any] =nn.Parameter(torch.FloatTensor(weights["input_proj"]["kernel"].T ) )
lowerCamelCase__: Optional[Any] =nn.Parameter(
torch.FloatTensor(weights["Embed_0"]["embedding"] ) , requires_grad=__a )
for lyr_num, lyr in enumerate(model.encoders ):
lowerCamelCase__: Tuple =weights[F"""layers_{lyr_num}"""]
lowerCamelCase__: Any =ly_weight['''attention''']
lowerCamelCase__: Tuple =nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) )
lowerCamelCase__: Dict =nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) )
lowerCamelCase__: List[Any] =nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) )
lowerCamelCase__: Tuple =nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) )
lowerCamelCase__: Dict =nn.Parameter(
torch.FloatTensor(ly_weight["pre_attention_layer_norm"]["scale"] ) )
lowerCamelCase__: Any =nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_0"]["kernel"].T ) )
lowerCamelCase__: List[Any] =nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_1"]["kernel"].T ) )
lowerCamelCase__: Tuple =nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wo"]["kernel"].T ) )
lowerCamelCase__: Dict =nn.Parameter(torch.FloatTensor(ly_weight["pre_mlp_layer_norm"]["scale"] ) )
lowerCamelCase__: Optional[int] =nn.Parameter(torch.FloatTensor(weights["encoder_norm"]["scale"] ) )
return model
def lowerCAmelCase_ ( __a , __a ) -> Any:
"""simple docstring"""
lowerCamelCase__: Optional[Any] =nn.Parameter(torch.FloatTensor(weights["time_emb_dense0"]["kernel"].T ) )
lowerCamelCase__: Optional[int] =nn.Parameter(torch.FloatTensor(weights["time_emb_dense1"]["kernel"].T ) )
lowerCamelCase__: Any =nn.Parameter(
torch.FloatTensor(weights["Embed_0"]["embedding"] ) , requires_grad=__a )
lowerCamelCase__: Optional[int] =nn.Parameter(
torch.FloatTensor(weights["continuous_inputs_projection"]["kernel"].T ) )
for lyr_num, lyr in enumerate(model.decoders ):
lowerCamelCase__: List[Any] =weights[F"""layers_{lyr_num}"""]
lowerCamelCase__: Any =nn.Parameter(
torch.FloatTensor(ly_weight["pre_self_attention_layer_norm"]["scale"] ) )
lowerCamelCase__: Optional[int] =nn.Parameter(
torch.FloatTensor(ly_weight["FiLMLayer_0"]["DenseGeneral_0"]["kernel"].T ) )
lowerCamelCase__: str =ly_weight['''self_attention''']
lowerCamelCase__: int =nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) )
lowerCamelCase__: List[Any] =nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) )
lowerCamelCase__: Union[str, Any] =nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) )
lowerCamelCase__: List[str] =nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) )
lowerCamelCase__: str =ly_weight['''MultiHeadDotProductAttention_0''']
lowerCamelCase__: Any =nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) )
lowerCamelCase__: Optional[Any] =nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) )
lowerCamelCase__: List[str] =nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) )
lowerCamelCase__: Tuple =nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) )
lowerCamelCase__: List[str] =nn.Parameter(
torch.FloatTensor(ly_weight["pre_cross_attention_layer_norm"]["scale"] ) )
lowerCamelCase__: Optional[Any] =nn.Parameter(torch.FloatTensor(ly_weight["pre_mlp_layer_norm"]["scale"] ) )
lowerCamelCase__: List[str] =nn.Parameter(
torch.FloatTensor(ly_weight["FiLMLayer_1"]["DenseGeneral_0"]["kernel"].T ) )
lowerCamelCase__: Optional[int] =nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_0"]["kernel"].T ) )
lowerCamelCase__: str =nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_1"]["kernel"].T ) )
lowerCamelCase__: Union[str, Any] =nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wo"]["kernel"].T ) )
lowerCamelCase__: List[Any] =nn.Parameter(torch.FloatTensor(weights["decoder_norm"]["scale"] ) )
lowerCamelCase__: List[Any] =nn.Parameter(torch.FloatTensor(weights["spec_out_dense"]["kernel"].T ) )
return model
def lowerCAmelCase_ ( __a ) -> List[str]:
"""simple docstring"""
lowerCamelCase__: Union[str, Any] =checkpoints.load_tax_checkpoint(args.checkpoint_path )
lowerCamelCase__: int =jnp.tree_util.tree_map(onp.array , __a )
lowerCamelCase__: Any =[
'''from __gin__ import dynamic_registration''',
'''from music_spectrogram_diffusion.models.diffusion import diffusion_utils''',
'''diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0''',
'''diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()''',
]
lowerCamelCase__: Optional[int] =os.path.join(args.checkpoint_path , ".." , "config.gin" )
lowerCamelCase__: Any =inference.parse_training_gin_file(__a , __a )
lowerCamelCase__: Tuple =inference.InferenceModel(args.checkpoint_path , __a )
lowerCamelCase__: List[str] =DDPMScheduler(beta_schedule="squaredcos_cap_v2" , variance_type="fixed_large" )
lowerCamelCase__: List[str] =SpectrogramNotesEncoder(
max_length=synth_model.sequence_length["inputs"] , vocab_size=synth_model.model.module.config.vocab_size , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj="gated-gelu" , )
lowerCamelCase__: Tuple =SpectrogramContEncoder(
input_dims=synth_model.audio_codec.n_dims , targets_context_length=synth_model.sequence_length["targets_context"] , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj="gated-gelu" , )
lowerCamelCase__: Dict =TaFilmDecoder(
input_dims=synth_model.audio_codec.n_dims , targets_length=synth_model.sequence_length["targets_context"] , max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time , d_model=synth_model.model.module.config.emb_dim , num_layers=synth_model.model.module.config.num_decoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , dropout_rate=synth_model.model.module.config.dropout_rate , )
lowerCamelCase__: List[str] =load_notes_encoder(ta_checkpoint["target"]["token_encoder"] , __a )
lowerCamelCase__: List[Any] =load_continuous_encoder(ta_checkpoint["target"]["continuous_encoder"] , __a )
lowerCamelCase__: Optional[Any] =load_decoder(ta_checkpoint["target"]["decoder"] , __a )
lowerCamelCase__: Optional[Any] =OnnxRuntimeModel.from_pretrained("kashif/soundstream_mel_decoder" )
lowerCamelCase__: List[str] =SpectrogramDiffusionPipeline(
notes_encoder=__a , continuous_encoder=__a , decoder=__a , scheduler=__a , melgan=__a , )
if args.save:
pipe.save_pretrained(args.output_path )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
parser.add_argument("--output_path", default=None, type=str, required=True, help="Path to the converted model.")
parser.add_argument(
"--save", default=True, type=bool, required=False, help="Whether to save the converted model or not."
)
parser.add_argument(
"--checkpoint_path",
default=f'{MODEL}/checkpoint_500000',
type=str,
required=False,
help="Path to the original jax model checkpoint.",
)
__A = parser.parse_args()
main(args)
| 10
|
import random
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Optional[Any] = a[left_index]
__UpperCamelCase :Any = left_index + 1
for j in range(left_index + 1 , SCREAMING_SNAKE_CASE ):
if a[j] < pivot:
__UpperCamelCase , __UpperCamelCase :str = a[i], a[j]
i += 1
__UpperCamelCase , __UpperCamelCase :Optional[int] = a[i - 1], a[left_index]
return i - 1
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if left < right:
__UpperCamelCase :int = random.randint(SCREAMING_SNAKE_CASE , right - 1 )
__UpperCamelCase , __UpperCamelCase :List[str] = (
a[left],
a[pivot],
) # switches the pivot with the left most bound
__UpperCamelCase :Dict = partition(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
quick_sort_random(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # recursive quicksort to the left of the pivot point
quick_sort_random(
SCREAMING_SNAKE_CASE , pivot_index + 1 , SCREAMING_SNAKE_CASE ) # recursive quicksort to the right of the pivot point
def lowerCamelCase ( ):
'''simple docstring'''
__UpperCamelCase :Tuple = input('''Enter numbers separated by a comma:\n''' ).strip()
__UpperCamelCase :Union[str, Any] = [int(SCREAMING_SNAKE_CASE ) for item in user_input.split(''',''' )]
quick_sort_random(SCREAMING_SNAKE_CASE , 0 , len(SCREAMING_SNAKE_CASE ) )
print(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 43
| 0
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_rembert import RemBertTokenizer
else:
_A : Optional[Any] = None
_A : Dict = logging.get_logger(__name__)
_A : Dict = {"""vocab_file""": """sentencepiece.model""", """tokenizer_file""": """tokenizer.json"""}
_A : Any = {
"""vocab_file""": {
"""google/rembert""": """https://huggingface.co/google/rembert/resolve/main/sentencepiece.model""",
},
"""tokenizer_file""": {
"""google/rembert""": """https://huggingface.co/google/rembert/resolve/main/tokenizer.json""",
},
}
_A : Any = {
"""google/rembert""": 2_56,
}
_A : List[Any] = """▁"""
class a__ ( UpperCAmelCase_ ):
__lowerCAmelCase = VOCAB_FILES_NAMES
__lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase = RemBertTokenizer
def __init__( self , _a=None , _a=None , _a=True , _a=True , _a=False , _a="[CLS]" , _a="[SEP]" , _a="<unk>" , _a="[SEP]" , _a="<pad>" , _a="[CLS]" , _a="[MASK]" , **_a , ):
# Mask token behave like a normal word, i.e. include the space before it
lowercase : int = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else mask_token
super().__init__(
__lowercase , tokenizer_file=__lowercase , do_lower_case=__lowercase , remove_space=__lowercase , keep_accents=__lowercase , bos_token=__lowercase , eos_token=__lowercase , unk_token=__lowercase , sep_token=__lowercase , pad_token=__lowercase , cls_token=__lowercase , mask_token=__lowercase , **__lowercase , )
lowercase : Dict = do_lower_case
lowercase : List[Any] = remove_space
lowercase : Tuple = keep_accents
lowercase : Dict = vocab_file
lowercase : List[str] = False if not self.vocab_file else True
def __magic_name__ ( self , _a , _a = None ):
lowercase : Dict = [self.sep_token_id]
lowercase : int = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __magic_name__ ( self , _a , _a = None , _a = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(__lowercase )) + [1] + ([0] * len(__lowercase )) + [1]
return [1] + ([0] * len(__lowercase )) + [1]
def __magic_name__ ( self , _a , _a = None ):
lowercase : Optional[Any] = [self.sep_token_id]
lowercase : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __magic_name__ ( self , _a , _a = None ):
if not os.path.isdir(__lowercase ):
logger.error("Vocabulary path ({}) should be a directory".format(__lowercase ) )
return
lowercase : List[str] = os.path.join(
__lowercase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowercase ):
copyfile(self.vocab_file , __lowercase )
return (out_vocab_file,)
| 202
|
def lowerCamelCase ( SCREAMING_SNAKE_CASE = 1 , SCREAMING_SNAKE_CASE = 1_000 ):
'''simple docstring'''
__UpperCamelCase :Union[str, Any] = 1
__UpperCamelCase :Any = 0
for divide_by_number in range(SCREAMING_SNAKE_CASE , digit + 1 ):
__UpperCamelCase :list[int] = []
__UpperCamelCase :Optional[int] = numerator
for _ in range(1 , digit + 1 ):
if now_divide in has_been_divided:
if longest_list_length < len(SCREAMING_SNAKE_CASE ):
__UpperCamelCase :Optional[Any] = len(SCREAMING_SNAKE_CASE )
__UpperCamelCase :int = divide_by_number
else:
has_been_divided.append(SCREAMING_SNAKE_CASE )
__UpperCamelCase :Any = now_divide * 10 % divide_by_number
return the_digit
# Tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 43
| 0
|
from torch import nn
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(F"""Unsupported activation function: {act_fn}""" )
| 130
|
import argparse
import json
from tqdm import tqdm
def lowerCamelCase ( ):
'''simple docstring'''
__UpperCamelCase :Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--src_path''' , type=SCREAMING_SNAKE_CASE , default='''biencoder-nq-dev.json''' , help='''Path to raw DPR training data''' , )
parser.add_argument(
'''--evaluation_set''' , type=SCREAMING_SNAKE_CASE , help='''where to store parsed evaluation_set file''' , )
parser.add_argument(
'''--gold_data_path''' , type=SCREAMING_SNAKE_CASE , help='''where to store parsed gold_data_path file''' , )
__UpperCamelCase :str = parser.parse_args()
with open(args.src_path , '''r''' ) as src_file, open(args.evaluation_set , '''w''' ) as eval_file, open(
args.gold_data_path , '''w''' ) as gold_file:
__UpperCamelCase :List[str] = json.load(SCREAMING_SNAKE_CASE )
for dpr_record in tqdm(SCREAMING_SNAKE_CASE ):
__UpperCamelCase :List[str] = dpr_record['''question''']
__UpperCamelCase :Tuple = [context['''title'''] for context in dpr_record['''positive_ctxs''']]
eval_file.write(question + '''\n''' )
gold_file.write('''\t'''.join(SCREAMING_SNAKE_CASE ) + '''\n''' )
if __name__ == "__main__":
main()
| 43
| 0
|
"""simple docstring"""
import argparse
import json
from tqdm import tqdm
def _snake_case ( ) -> List[str]:
lowerCamelCase_ : Tuple =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--src_path" , type=lowerCamelCase__ , default="biencoder-nq-dev.json" , help="Path to raw DPR training data" , )
parser.add_argument(
"--evaluation_set" , type=lowerCamelCase__ , help="where to store parsed evaluation_set file" , )
parser.add_argument(
"--gold_data_path" , type=lowerCamelCase__ , help="where to store parsed gold_data_path file" , )
lowerCamelCase_ : str =parser.parse_args()
with open(args.src_path , "r" ) as src_file, open(args.evaluation_set , "w" ) as eval_file, open(
args.gold_data_path , "w" ) as gold_file:
lowerCamelCase_ : List[str] =json.load(lowerCamelCase__ )
for dpr_record in tqdm(lowerCamelCase__ ):
lowerCamelCase_ : List[str] =dpr_record['''question''']
lowerCamelCase_ : Tuple =[context['''title'''] for context in dpr_record['''positive_ctxs''']]
eval_file.write(question + "\n" )
gold_file.write("\t".join(lowerCamelCase__ ) + "\n" )
if __name__ == "__main__":
main()
| 144
|
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
__lowercase = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
__lowercase = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
__lowercase = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1000))
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Any = len([g for position, g in enumerate(SCREAMING_SNAKE_CASE ) if g == main_target[position]] )
return (item, float(SCREAMING_SNAKE_CASE ))
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Optional[Any] = random.randint(0 , len(SCREAMING_SNAKE_CASE ) - 1 )
__UpperCamelCase :Tuple = parent_a[:random_slice] + parent_a[random_slice:]
__UpperCamelCase :Union[str, Any] = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :int = list(SCREAMING_SNAKE_CASE )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
__UpperCamelCase :str = random.choice(SCREAMING_SNAKE_CASE )
return "".join(SCREAMING_SNAKE_CASE )
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ):
'''simple docstring'''
__UpperCamelCase :int = []
# Generate more children proportionally to the fitness score.
__UpperCamelCase :int = int(parent_a[1] * 100 ) + 1
__UpperCamelCase :List[str] = 10 if child_n >= 10 else child_n
for _ in range(SCREAMING_SNAKE_CASE ):
__UpperCamelCase :Optional[Any] = population_score[random.randint(0 , SCREAMING_SNAKE_CASE )][0]
__UpperCamelCase , __UpperCamelCase :Any = crossover(parent_a[0] , SCREAMING_SNAKE_CASE )
# Append new string to the population list.
pop.append(mutate(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
pop.append(mutate(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
return pop
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = True ):
'''simple docstring'''
if N_POPULATION < N_SELECTED:
__UpperCamelCase :List[Any] = f"""{N_POPULATION} must be bigger than {N_SELECTED}"""
raise ValueError(SCREAMING_SNAKE_CASE )
# Verify that the target contains no genes besides the ones inside genes variable.
__UpperCamelCase :List[str] = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
__UpperCamelCase :Optional[int] = f"""{not_in_genes_list} is not in genes list, evolution cannot converge"""
raise ValueError(SCREAMING_SNAKE_CASE )
# Generate random starting population.
__UpperCamelCase :int = []
for _ in range(SCREAMING_SNAKE_CASE ):
population.append(''''''.join([random.choice(SCREAMING_SNAKE_CASE ) for i in range(len(SCREAMING_SNAKE_CASE ) )] ) )
# Just some logs to know what the algorithms is doing.
__UpperCamelCase , __UpperCamelCase :List[Any] = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(SCREAMING_SNAKE_CASE )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
__UpperCamelCase :Tuple = [evaluate(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for item in population]
# Check if there is a matching evolution.
__UpperCamelCase :Tuple = sorted(SCREAMING_SNAKE_CASE , key=lambda SCREAMING_SNAKE_CASE : x[1] , reverse=SCREAMING_SNAKE_CASE )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
f"""\nGeneration: {generation}"""
f"""\nTotal Population:{total_population}"""
f"""\nBest score: {population_score[0][1]}"""
f"""\nBest string: {population_score[0][0]}""" )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
__UpperCamelCase :str = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(SCREAMING_SNAKE_CASE )
# Normalize population score to be between 0 and 1.
__UpperCamelCase :Union[str, Any] = [
(item, score / len(SCREAMING_SNAKE_CASE )) for item, score in population_score
]
# This is selection
for i in range(SCREAMING_SNAKE_CASE ):
population.extend(select(population_score[int(SCREAMING_SNAKE_CASE )] , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(SCREAMING_SNAKE_CASE ) > N_POPULATION:
break
if __name__ == "__main__":
__lowercase = (
'''This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!'''
)
__lowercase = list(
''' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm'''
'''nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\'''
)
__lowercase , __lowercase , __lowercase = basic(target_str, genes_list)
print(
F'\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}'
)
| 43
| 0
|
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
from math import floor
import numpy as np
from transformers import CvtConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFCvtForImageClassification, TFCvtModel
from transformers.models.cvt.modeling_tf_cvt import TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowercase( UpperCAmelCase_ ):
'''simple docstring'''
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
_snake_case : Union[str, Any] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__lowercase, """embed_dim""" ) )
self.parent.assertTrue(hasattr(__lowercase, """num_heads""" ) )
class lowercase:
'''simple docstring'''
def __init__( self: Union[str, Any], a_: List[Any], a_: List[Any]=13, a_: Optional[int]=64, a_: Optional[Any]=3, a_: Any=[16, 48, 96], a_: Tuple=[1, 3, 6], a_: Dict=[1, 2, 10], a_: int=[7, 3, 3], a_: Optional[Any]=[4, 2, 2], a_: Any=[2, 1, 1], a_: Union[str, Any]=[2, 2, 2], a_: Optional[int]=[False, False, True], a_: List[Any]=[0.0, 0.0, 0.0], a_: List[Any]=0.02, a_: Optional[Any]=1E-12, a_: List[str]=True, a_: Dict=True, a_: List[Any]=2, ):
'''simple docstring'''
_snake_case : Optional[Any] = parent
_snake_case : Tuple = batch_size
_snake_case : List[Any] = image_size
_snake_case : str = patch_sizes
_snake_case : Optional[Any] = patch_stride
_snake_case : int = patch_padding
_snake_case : int = is_training
_snake_case : Optional[Any] = use_labels
_snake_case : str = num_labels
_snake_case : Tuple = num_channels
_snake_case : Optional[Any] = embed_dim
_snake_case : List[Any] = num_heads
_snake_case : Dict = stride_kv
_snake_case : Optional[Any] = depth
_snake_case : Any = cls_token
_snake_case : Dict = attention_drop_rate
_snake_case : int = initializer_range
_snake_case : str = layer_norm_eps
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
_snake_case : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_snake_case : int = None
if self.use_labels:
# create a random int32 tensor of given shape
_snake_case : Optional[Any] = ids_tensor([self.batch_size], self.num_labels )
_snake_case : Dict = self.get_config()
return config, pixel_values, labels
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
return CvtConfig(
image_size=self.image_size, num_labels=self.num_labels, num_channels=self.num_channels, embed_dim=self.embed_dim, num_heads=self.num_heads, patch_sizes=self.patch_sizes, patch_padding=self.patch_padding, patch_stride=self.patch_stride, stride_kv=self.stride_kv, depth=self.depth, cls_token=self.cls_token, attention_drop_rate=self.attention_drop_rate, initializer_range=self.initializer_range, )
def UpperCamelCase_ ( self: Union[str, Any], a_: Tuple, a_: str, a_: Any ):
'''simple docstring'''
_snake_case : Any = TFCvtModel(config=__lowercase )
_snake_case : Optional[int] = model(__lowercase, training=__lowercase )
_snake_case : List[Any] = (self.image_size, self.image_size)
_snake_case : Tuple = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
_snake_case : Tuple = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
_snake_case : int = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.embed_dim[-1], height, width) )
def UpperCamelCase_ ( self: Tuple, a_: Optional[int], a_: int, a_: Optional[int] ):
'''simple docstring'''
_snake_case : List[str] = self.num_labels
_snake_case : Any = TFCvtForImageClassification(__lowercase )
_snake_case : int = model(__lowercase, labels=__lowercase, training=__lowercase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case : List[str] = self.prepare_config_and_inputs()
_snake_case : List[str] = config_and_inputs
_snake_case : List[Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class lowercase( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
lowercase__ = (TFCvtModel, TFCvtForImageClassification) if is_tf_available() else ()
lowercase__ = (
{"""feature-extraction""": TFCvtModel, """image-classification""": TFCvtForImageClassification}
if is_tf_available()
else {}
)
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case : List[str] = TFCvtModelTester(self )
_snake_case : List[str] = TFCvtConfigTester(self, config_class=__lowercase, has_text_modality=__lowercase, hidden_size=37 )
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
self.config_tester.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
@unittest.skip(reason="""Cvt does not output attentions""" )
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
pass
@unittest.skip(reason="""Cvt does not use inputs_embeds""" )
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
pass
@unittest.skip(reason="""Cvt does not support input and output embeddings""" )
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("""GPU""" ) ) == 0, reason="""TF does not support backprop for grouped convolutions on CPU.""", )
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
super().test_dataset_conversion()
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("""GPU""" ) ) == 0, reason="""TF does not support backprop for grouped convolutions on CPU.""", )
@slow
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
super().test_keras_fit()
@unittest.skip(reason="""Get `Failed to determine best cudnn convolution algo.` error after using TF 2.12+cuda 11.8""" )
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
_snake_case : int = tf.keras.mixed_precision.Policy("""mixed_float16""" )
tf.keras.mixed_precision.set_global_policy(__lowercase )
super().test_keras_fit()
tf.keras.mixed_precision.set_global_policy("""float32""" )
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
_snake_case : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case : Dict = model_class(__lowercase )
_snake_case : Dict = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_snake_case : Dict = [*signature.parameters.keys()]
_snake_case : Optional[int] = ['''pixel_values''']
self.assertListEqual(arg_names[:1], __lowercase )
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
def check_hidden_states_output(a_: Tuple, a_: Union[str, Any], a_: Any ):
_snake_case : Tuple = model_class(__lowercase )
_snake_case : Optional[int] = model(**self._prepare_for_class(__lowercase, __lowercase ) )
_snake_case : List[Any] = outputs.hidden_states
_snake_case : Any = len(self.model_tester.depth )
self.assertEqual(len(__lowercase ), __lowercase )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ), [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
], )
_snake_case : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case : int = True
check_hidden_states_output(__lowercase, __lowercase, __lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_snake_case : Optional[Any] = True
check_hidden_states_output(__lowercase, __lowercase, __lowercase )
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
_snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowercase )
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
_snake_case : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowercase )
@slow
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
for model_name in TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case : Optional[int] = TFCvtModel.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : str = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class lowercase( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
return AutoImageProcessor.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : List[str] = TFCvtForImageClassification.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
_snake_case : Optional[Any] = self.default_image_processor
_snake_case : Tuple = prepare_img()
_snake_case : Dict = image_processor(images=__lowercase, return_tensors="""tf""" )
# forward pass
_snake_case : str = model(**__lowercase )
# verify the logits
_snake_case : List[str] = tf.TensorShape((1, 1_000) )
self.assertEqual(outputs.logits.shape, __lowercase )
_snake_case : Dict = tf.constant([0.9_285, 0.9_015, -0.3_150] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy(), __lowercase, atol=1E-4 ) )
| 64
|
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
__lowercase = 16
__lowercase = 32
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 16 , SCREAMING_SNAKE_CASE = "bert-base-cased" ):
'''simple docstring'''
__UpperCamelCase :List[str] = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE )
__UpperCamelCase :Any = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(SCREAMING_SNAKE_CASE ):
# max_length=None => use the model max length (it's actually the default)
__UpperCamelCase :int = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
__UpperCamelCase :Tuple = datasets.map(
SCREAMING_SNAKE_CASE , batched=SCREAMING_SNAKE_CASE , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , load_from_cache_file=SCREAMING_SNAKE_CASE )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__UpperCamelCase :List[str] = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(SCREAMING_SNAKE_CASE ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(SCREAMING_SNAKE_CASE , padding='''max_length''' , max_length=128 , return_tensors='''pt''' )
return tokenizer.pad(SCREAMING_SNAKE_CASE , padding='''longest''' , return_tensors='''pt''' )
# Instantiate dataloaders.
__UpperCamelCase :Union[str, Any] = DataLoader(
tokenized_datasets['''train'''] , shuffle=SCREAMING_SNAKE_CASE , collate_fn=SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE )
__UpperCamelCase :Dict = DataLoader(
tokenized_datasets['''validation'''] , shuffle=SCREAMING_SNAKE_CASE , collate_fn=SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE )
return train_dataloader, eval_dataloader
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :List[Any] = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__UpperCamelCase :int = config['''lr''']
__UpperCamelCase :str = int(config['''num_epochs'''] )
__UpperCamelCase :Any = int(config['''seed'''] )
__UpperCamelCase :Dict = int(config['''batch_size'''] )
__UpperCamelCase :Optional[Any] = args.model_name_or_path
set_seed(SCREAMING_SNAKE_CASE )
__UpperCamelCase , __UpperCamelCase :Dict = get_dataloaders(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__UpperCamelCase :Any = AutoModelForSequenceClassification.from_pretrained(SCREAMING_SNAKE_CASE , return_dict=SCREAMING_SNAKE_CASE )
# Instantiate optimizer
__UpperCamelCase :List[str] = (
AdamW
if accelerator.state.deepspeed_plugin is None
or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
__UpperCamelCase :Optional[Any] = optimizer_cls(params=model.parameters() , lr=SCREAMING_SNAKE_CASE )
if accelerator.state.deepspeed_plugin is not None:
__UpperCamelCase :Dict = accelerator.state.deepspeed_plugin.deepspeed_config[
'''gradient_accumulation_steps'''
]
else:
__UpperCamelCase :Dict = 1
__UpperCamelCase :Tuple = (len(SCREAMING_SNAKE_CASE ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
__UpperCamelCase :str = get_linear_schedule_with_warmup(
optimizer=SCREAMING_SNAKE_CASE , num_warmup_steps=0 , num_training_steps=SCREAMING_SNAKE_CASE , )
else:
__UpperCamelCase :Dict = DummyScheduler(SCREAMING_SNAKE_CASE , total_num_steps=SCREAMING_SNAKE_CASE , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase :int = accelerator.prepare(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# We need to keep track of how many total steps we have iterated over
__UpperCamelCase :List[Any] = 0
# We also need to keep track of the stating epoch so files are named properly
__UpperCamelCase :Dict = 0
# Now we train the model
__UpperCamelCase :Any = evaluate.load('''glue''' , '''mrpc''' )
__UpperCamelCase :Union[str, Any] = 0
__UpperCamelCase :Optional[int] = {}
for epoch in range(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
model.train()
for step, batch in enumerate(SCREAMING_SNAKE_CASE ):
__UpperCamelCase :Optional[Any] = model(**SCREAMING_SNAKE_CASE )
__UpperCamelCase :Tuple = outputs.loss
__UpperCamelCase :str = loss / gradient_accumulation_steps
accelerator.backward(SCREAMING_SNAKE_CASE )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
__UpperCamelCase :Any = 0
for step, batch in enumerate(SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__UpperCamelCase :Any = model(**SCREAMING_SNAKE_CASE )
__UpperCamelCase :Optional[int] = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
__UpperCamelCase , __UpperCamelCase :List[Any] = accelerator.gather(
(predictions, batch['''labels''']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(SCREAMING_SNAKE_CASE ) - 1:
__UpperCamelCase :List[str] = predictions[: len(eval_dataloader.dataset ) - samples_seen]
__UpperCamelCase :Optional[int] = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=SCREAMING_SNAKE_CASE , references=SCREAMING_SNAKE_CASE , )
__UpperCamelCase :Dict = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , SCREAMING_SNAKE_CASE )
__UpperCamelCase :str = eval_metric['''accuracy''']
if best_performance < eval_metric["accuracy"]:
__UpperCamelCase :int = eval_metric['''accuracy''']
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), f"""Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}"""
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , '''all_results.json''' ) , '''w''' ) as f:
json.dump(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def lowerCamelCase ( ):
'''simple docstring'''
__UpperCamelCase :Tuple = argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''' )
parser.add_argument(
'''--model_name_or_path''' , type=SCREAMING_SNAKE_CASE , default='''bert-base-cased''' , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=SCREAMING_SNAKE_CASE , )
parser.add_argument(
'''--output_dir''' , type=SCREAMING_SNAKE_CASE , default='''.''' , help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' , )
parser.add_argument(
'''--performance_lower_bound''' , type=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , help='''Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.''' , )
parser.add_argument(
'''--num_epochs''' , type=SCREAMING_SNAKE_CASE , default=3 , help='''Number of train epochs.''' , )
__UpperCamelCase :List[str] = parser.parse_args()
__UpperCamelCase :Tuple = {'''lr''': 2e-5, '''num_epochs''': args.num_epochs, '''seed''': 42, '''batch_size''': 16}
training_function(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 43
| 0
|
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase_ : Dict = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
lowerCamelCase_ : Any = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F'transformer.encoder.layers.{i}.self_attn.out_proj.weight', F'encoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(F'transformer.encoder.layers.{i}.self_attn.out_proj.bias', F'encoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append((F'transformer.encoder.layers.{i}.linear1.weight', F'encoder.layers.{i}.fc1.weight'))
rename_keys.append((F'transformer.encoder.layers.{i}.linear1.bias', F'encoder.layers.{i}.fc1.bias'))
rename_keys.append((F'transformer.encoder.layers.{i}.linear2.weight', F'encoder.layers.{i}.fc2.weight'))
rename_keys.append((F'transformer.encoder.layers.{i}.linear2.bias', F'encoder.layers.{i}.fc2.bias'))
rename_keys.append(
(F'transformer.encoder.layers.{i}.norm1.weight', F'encoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((F'transformer.encoder.layers.{i}.norm1.bias', F'encoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append((F'transformer.encoder.layers.{i}.norm2.weight', F'encoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((F'transformer.encoder.layers.{i}.norm2.bias', F'encoder.layers.{i}.final_layer_norm.bias'))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F'transformer.decoder.layers.{i}.self_attn.out_proj.weight', F'decoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.self_attn.out_proj.bias', F'decoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append(
(
F'transformer.decoder.layers.{i}.cross_attn.out_proj.weight',
F'decoder.layers.{i}.encoder_attn.out_proj.weight',
)
)
rename_keys.append(
(
F'transformer.decoder.layers.{i}.cross_attn.out_proj.bias',
F'decoder.layers.{i}.encoder_attn.out_proj.bias',
)
)
rename_keys.append((F'transformer.decoder.layers.{i}.linear1.weight', F'decoder.layers.{i}.fc1.weight'))
rename_keys.append((F'transformer.decoder.layers.{i}.linear1.bias', F'decoder.layers.{i}.fc1.bias'))
rename_keys.append((F'transformer.decoder.layers.{i}.linear2.weight', F'decoder.layers.{i}.fc2.weight'))
rename_keys.append((F'transformer.decoder.layers.{i}.linear2.bias', F'decoder.layers.{i}.fc2.bias'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm1.weight', F'decoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((F'transformer.decoder.layers.{i}.norm1.bias', F'decoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm2.weight', F'decoder.layers.{i}.encoder_attn_layer_norm.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm2.bias', F'decoder.layers.{i}.encoder_attn_layer_norm.bias')
)
rename_keys.append((F'transformer.decoder.layers.{i}.norm3.weight', F'decoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((F'transformer.decoder.layers.{i}.norm3.bias', F'decoder.layers.{i}.final_layer_norm.bias'))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(F'transformer.decoder.layers.{i}.sa_qcontent_proj.weight', F'decoder.layers.{i}.sa_qcontent_proj.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.sa_kcontent_proj.weight', F'decoder.layers.{i}.sa_kcontent_proj.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.sa_qpos_proj.weight', F'decoder.layers.{i}.sa_qpos_proj.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.sa_kpos_proj.weight', F'decoder.layers.{i}.sa_kpos_proj.weight')
)
rename_keys.append((F'transformer.decoder.layers.{i}.sa_v_proj.weight', F'decoder.layers.{i}.sa_v_proj.weight'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.ca_qcontent_proj.weight', F'decoder.layers.{i}.ca_qcontent_proj.weight')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(F'transformer.decoder.layers.{i}.ca_kcontent_proj.weight', F'decoder.layers.{i}.ca_kcontent_proj.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.ca_kpos_proj.weight', F'decoder.layers.{i}.ca_kpos_proj.weight')
)
rename_keys.append((F'transformer.decoder.layers.{i}.ca_v_proj.weight', F'decoder.layers.{i}.ca_v_proj.weight'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight', F'decoder.layers.{i}.ca_qpos_sine_proj.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.sa_qcontent_proj.bias', F'decoder.layers.{i}.sa_qcontent_proj.bias')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.sa_kcontent_proj.bias', F'decoder.layers.{i}.sa_kcontent_proj.bias')
)
rename_keys.append((F'transformer.decoder.layers.{i}.sa_qpos_proj.bias', F'decoder.layers.{i}.sa_qpos_proj.bias'))
rename_keys.append((F'transformer.decoder.layers.{i}.sa_kpos_proj.bias', F'decoder.layers.{i}.sa_kpos_proj.bias'))
rename_keys.append((F'transformer.decoder.layers.{i}.sa_v_proj.bias', F'decoder.layers.{i}.sa_v_proj.bias'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.ca_qcontent_proj.bias', F'decoder.layers.{i}.ca_qcontent_proj.bias')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(F'transformer.decoder.layers.{i}.ca_kcontent_proj.bias', F'decoder.layers.{i}.ca_kcontent_proj.bias')
)
rename_keys.append((F'transformer.decoder.layers.{i}.ca_kpos_proj.bias', F'decoder.layers.{i}.ca_kpos_proj.bias'))
rename_keys.append((F'transformer.decoder.layers.{i}.ca_v_proj.bias', F'decoder.layers.{i}.ca_v_proj.bias'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias', F'decoder.layers.{i}.ca_qpos_sine_proj.bias')
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
("""input_proj.weight""", """input_projection.weight"""),
("""input_proj.bias""", """input_projection.bias"""),
("""query_embed.weight""", """query_position_embeddings.weight"""),
("""transformer.decoder.norm.weight""", """decoder.layernorm.weight"""),
("""transformer.decoder.norm.bias""", """decoder.layernorm.bias"""),
("""class_embed.weight""", """class_labels_classifier.weight"""),
("""class_embed.bias""", """class_labels_classifier.bias"""),
("""bbox_embed.layers.0.weight""", """bbox_predictor.layers.0.weight"""),
("""bbox_embed.layers.0.bias""", """bbox_predictor.layers.0.bias"""),
("""bbox_embed.layers.1.weight""", """bbox_predictor.layers.1.weight"""),
("""bbox_embed.layers.1.bias""", """bbox_predictor.layers.1.bias"""),
("""bbox_embed.layers.2.weight""", """bbox_predictor.layers.2.weight"""),
("""bbox_embed.layers.2.bias""", """bbox_predictor.layers.2.bias"""),
("""transformer.decoder.ref_point_head.layers.0.weight""", """decoder.ref_point_head.layers.0.weight"""),
("""transformer.decoder.ref_point_head.layers.0.bias""", """decoder.ref_point_head.layers.0.bias"""),
("""transformer.decoder.ref_point_head.layers.1.weight""", """decoder.ref_point_head.layers.1.weight"""),
("""transformer.decoder.ref_point_head.layers.1.bias""", """decoder.ref_point_head.layers.1.bias"""),
("""transformer.decoder.query_scale.layers.0.weight""", """decoder.query_scale.layers.0.weight"""),
("""transformer.decoder.query_scale.layers.0.bias""", """decoder.query_scale.layers.0.bias"""),
("""transformer.decoder.query_scale.layers.1.weight""", """decoder.query_scale.layers.1.weight"""),
("""transformer.decoder.query_scale.layers.1.bias""", """decoder.query_scale.layers.1.bias"""),
("""transformer.decoder.layers.0.ca_qpos_proj.weight""", """decoder.layers.0.ca_qpos_proj.weight"""),
("""transformer.decoder.layers.0.ca_qpos_proj.bias""", """decoder.layers.0.ca_qpos_proj.bias"""),
]
)
def _A ( lowercase , lowercase , lowercase ):
"""simple docstring"""
a =state_dict.pop(lowercase )
a =val
def _A ( lowercase ):
"""simple docstring"""
a =OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
a =key.replace('''backbone.0.body''' , '''backbone.conv_encoder.model''' )
a =value
else:
a =value
return new_state_dict
def _A ( lowercase , lowercase=False ):
"""simple docstring"""
a =''''''
if is_panoptic:
a ='''conditional_detr.'''
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
a =state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' )
a =state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
a =in_proj_weight[:2_56, :]
a =in_proj_bias[:2_56]
a =in_proj_weight[2_56:5_12, :]
a =in_proj_bias[2_56:5_12]
a =in_proj_weight[-2_56:, :]
a =in_proj_bias[-2_56:]
def _A ( ):
"""simple docstring"""
a ='''http://images.cocodataset.org/val2017/000000039769.jpg'''
a =Image.open(requests.get(lowercase , stream=lowercase ).raw )
return im
@torch.no_grad()
def _A ( lowercase , lowercase ):
"""simple docstring"""
a =ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
a ='''resnet101'''
if "dc5" in model_name:
a =True
a ='''panoptic''' in model_name
if is_panoptic:
a =2_50
else:
a =91
a ='''huggingface/label-files'''
a ='''coco-detection-id2label.json'''
a =json.load(open(hf_hub_download(lowercase , lowercase , repo_type='''dataset''' ) , '''r''' ) )
a ={int(lowercase ): v for k, v in idalabel.items()}
a =idalabel
a ={v: k for k, v in idalabel.items()}
# load image processor
a ='''coco_panoptic''' if is_panoptic else '''coco_detection'''
a =ConditionalDetrImageProcessor(format=lowercase )
# prepare image
a =prepare_img()
a =image_processor(images=lowercase , return_tensors='''pt''' )
a =encoding['''pixel_values''']
logger.info(f'''Converting model {model_name}...''' )
# load original model from torch hub
a =torch.hub.load('''DeppMeng/ConditionalDETR''' , lowercase , pretrained=lowercase ).eval()
a =conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
a ='''conditional_detr.''' + src
rename_key(lowercase , lowercase , lowercase )
a =rename_backbone_keys(lowercase )
# query, key and value matrices need special treatment
read_in_q_k_v(lowercase , is_panoptic=lowercase )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
a ='''conditional_detr.model.''' if is_panoptic else '''model.'''
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith('''conditional_detr''' )
and not key.startswith('''class_labels_classifier''' )
and not key.startswith('''bbox_predictor''' )
):
a =state_dict.pop(lowercase )
a =val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
a =state_dict.pop(lowercase )
a =val
elif key.startswith('''bbox_attention''' ) or key.startswith('''mask_head''' ):
continue
else:
a =state_dict.pop(lowercase )
a =val
else:
if not key.startswith('''class_labels_classifier''' ) and not key.startswith('''bbox_predictor''' ):
a =state_dict.pop(lowercase )
a =val
# finally, create HuggingFace model and load state dict
a =ConditionalDetrForSegmentation(lowercase ) if is_panoptic else ConditionalDetrForObjectDetection(lowercase )
model.load_state_dict(lowercase )
model.eval()
model.push_to_hub(repo_id=lowercase , organization='''DepuMeng''' , commit_message='''Add model''' )
# verify our conversion
a =conditional_detr(lowercase )
a =model(lowercase )
assert torch.allclose(outputs.logits , original_outputs['''pred_logits'''] , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs['''pred_boxes'''] , atol=1E-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs['''pred_masks'''] , atol=1E-4 )
# Save model and image processor
logger.info(f'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(lowercase ).mkdir(exist_ok=lowercase )
model.save_pretrained(lowercase )
image_processor.save_pretrained(lowercase )
if __name__ == "__main__":
lowerCamelCase_ : Tuple = argparse.ArgumentParser()
parser.add_argument(
"""--model_name""",
default="""conditional_detr_resnet50""",
type=str,
help="""Name of the CONDITIONAL_DETR model you\'d like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
lowerCamelCase_ : str = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 81
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__lowercase = logging.get_logger(__name__)
__lowercase = {
'''SenseTime/deformable-detr''': '''https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json''',
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
a__ : List[str] = """deformable_detr"""
a__ : Union[str, Any] = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self , __lowercase=True , __lowercase=None , __lowercase=3 , __lowercase=300 , __lowercase=1_024 , __lowercase=6 , __lowercase=1_024 , __lowercase=8 , __lowercase=6 , __lowercase=1_024 , __lowercase=8 , __lowercase=0.0 , __lowercase=True , __lowercase="relu" , __lowercase=256 , __lowercase=0.1 , __lowercase=0.0 , __lowercase=0.0 , __lowercase=0.02 , __lowercase=1.0 , __lowercase=True , __lowercase=False , __lowercase="sine" , __lowercase="resnet50" , __lowercase=True , __lowercase=False , __lowercase=4 , __lowercase=4 , __lowercase=4 , __lowercase=False , __lowercase=300 , __lowercase=False , __lowercase=1 , __lowercase=5 , __lowercase=2 , __lowercase=1 , __lowercase=1 , __lowercase=5 , __lowercase=2 , __lowercase=0.1 , __lowercase=0.25 , __lowercase=False , **__lowercase , ) -> int:
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''')
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''')
__UpperCamelCase :str = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''])
elif isinstance(__lowercase , __lowercase):
__UpperCamelCase :str = backbone_config.get('''model_type''')
__UpperCamelCase :Tuple = CONFIG_MAPPING[backbone_model_type]
__UpperCamelCase :Any = config_class.from_dict(__lowercase)
__UpperCamelCase :int = use_timm_backbone
__UpperCamelCase :Dict = backbone_config
__UpperCamelCase :Any = num_channels
__UpperCamelCase :Optional[int] = num_queries
__UpperCamelCase :Any = max_position_embeddings
__UpperCamelCase :str = d_model
__UpperCamelCase :Tuple = encoder_ffn_dim
__UpperCamelCase :Union[str, Any] = encoder_layers
__UpperCamelCase :List[Any] = encoder_attention_heads
__UpperCamelCase :Any = decoder_ffn_dim
__UpperCamelCase :List[str] = decoder_layers
__UpperCamelCase :int = decoder_attention_heads
__UpperCamelCase :str = dropout
__UpperCamelCase :Any = attention_dropout
__UpperCamelCase :int = activation_dropout
__UpperCamelCase :List[Any] = activation_function
__UpperCamelCase :List[Any] = init_std
__UpperCamelCase :List[Any] = init_xavier_std
__UpperCamelCase :int = encoder_layerdrop
__UpperCamelCase :str = auxiliary_loss
__UpperCamelCase :Optional[Any] = position_embedding_type
__UpperCamelCase :Union[str, Any] = backbone
__UpperCamelCase :Any = use_pretrained_backbone
__UpperCamelCase :str = dilation
# deformable attributes
__UpperCamelCase :Optional[Any] = num_feature_levels
__UpperCamelCase :str = encoder_n_points
__UpperCamelCase :int = decoder_n_points
__UpperCamelCase :Union[str, Any] = two_stage
__UpperCamelCase :Optional[Any] = two_stage_num_proposals
__UpperCamelCase :Dict = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError('''If two_stage is True, with_box_refine must be True.''')
# Hungarian matcher
__UpperCamelCase :Optional[int] = class_cost
__UpperCamelCase :List[Any] = bbox_cost
__UpperCamelCase :str = giou_cost
# Loss coefficients
__UpperCamelCase :Tuple = mask_loss_coefficient
__UpperCamelCase :Tuple = dice_loss_coefficient
__UpperCamelCase :int = bbox_loss_coefficient
__UpperCamelCase :Any = giou_loss_coefficient
__UpperCamelCase :Dict = eos_coefficient
__UpperCamelCase :Optional[Any] = focal_alpha
__UpperCamelCase :Optional[Any] = disable_custom_kernels
super().__init__(is_encoder_decoder=__lowercase , **__lowercase)
@property
def UpperCamelCase__ ( self) -> int:
return self.encoder_attention_heads
@property
def UpperCamelCase__ ( self) -> int:
return self.d_model
def UpperCamelCase__ ( self) -> List[Any]:
__UpperCamelCase :Dict = copy.deepcopy(self.__dict__)
if self.backbone_config is not None:
__UpperCamelCase :Tuple = self.backbone_config.to_dict()
__UpperCamelCase :List[Any] = self.__class__.model_type
return output
| 43
| 0
|
a__: Any = {str(digit): digit**5 for digit in range(10)}
def UpperCamelCase__( UpperCamelCase__ : Optional[int] )->str:
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(UpperCamelCase__ ) )
def UpperCamelCase__( )->str:
return sum(
number
for number in range(10_00 , 1_00_00_00 )
if number == digits_fifth_powers_sum(UpperCamelCase__ ) )
if __name__ == "__main__":
print(solution())
| 193
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
a__ : Optional[Any] = """facebook/bart-large-mnli"""
a__ : int = (
"""This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which """
"""should be the text to classify, and `labels`, which should be the list of labels to use for classification. """
"""It returns the most likely label in the list of provided `labels` for the input text."""
)
a__ : Optional[Any] = """text_classifier"""
a__ : Any = AutoTokenizer
a__ : str = AutoModelForSequenceClassification
a__ : str = ["""text""", ["""text"""]]
a__ : Optional[int] = ["""text"""]
def UpperCamelCase__ ( self) -> Union[str, Any]:
super().setup()
__UpperCamelCase :int = self.model.config
__UpperCamelCase :Optional[Any] = -1
for idx, label in config.idalabel.items():
if label.lower().startswith('''entail'''):
__UpperCamelCase :List[Any] = int(__lowercase)
if self.entailment_id == -1:
raise ValueError('''Could not determine the entailment ID from the model config, please pass it at init.''')
def UpperCamelCase__ ( self , __lowercase , __lowercase) -> Union[str, Any]:
__UpperCamelCase :Any = labels
return self.pre_processor(
[text] * len(__lowercase) , [f"""This example is {label}""" for label in labels] , return_tensors='''pt''' , padding='''max_length''' , )
def UpperCamelCase__ ( self , __lowercase) -> Optional[Any]:
__UpperCamelCase :List[Any] = outputs.logits
__UpperCamelCase :Any = torch.argmax(logits[:, 2]).item()
return self._labels[label_id]
| 43
| 0
|
import os
import jsonlines
import numpy as np
from tqdm import tqdm
_UpperCamelCase = 2048
_UpperCamelCase = 4096
_UpperCamelCase = 42
_UpperCamelCase = os.environ.pop('''PROCESS_TRAIN''', '''false''')
_UpperCamelCase = {'''null''': 0, '''short''': 1, '''long''': 2, '''yes''': 3, '''no''': 4}
def lowerCAmelCase__( lowercase : List[Any] ) -> Optional[Any]:
def choose_first(lowercase : Any , lowercase : Tuple=False ):
assert isinstance(lowercase , lowercase )
if len(lowercase ) == 1:
__snake_case : str = answer[0]
return {k: [answer[k]] for k in answer} if is_long_answer else answer
for a in answer:
if is_long_answer:
__snake_case : List[Any] = {k: [a[k]] for k in a}
if len(a["start_token"] ) > 0:
break
return a
__snake_case : Union[str, Any] = {'''id''': example['''id''']}
__snake_case : Optional[Any] = example['''annotations''']
__snake_case : Tuple = annotation['''yes_no_answer''']
if 0 in yes_no_answer or 1 in yes_no_answer:
__snake_case : List[Any] = ['''yes'''] if 1 in yes_no_answer else ['''no''']
__snake_case : Tuple = []
__snake_case : List[Any] = []
__snake_case : Tuple = ['''<cls>''']
else:
__snake_case : Optional[Any] = ['''short''']
__snake_case : Union[str, Any] = choose_first(annotation["short_answers"] )
if len(out["start_token"] ) == 0:
# answer will be long if short is not available
__snake_case : int = ['''long''']
__snake_case : List[Any] = choose_first(annotation["long_answer"] , is_long_answer=lowercase )
__snake_case : str = []
answer.update(lowercase )
# disregard some samples
if len(answer["start_token"] ) > 1 or answer["start_token"] == answer["end_token"]:
__snake_case : Any = True
else:
__snake_case : str = False
__snake_case : Tuple = ['''start_token''', '''end_token''', '''start_byte''', '''end_byte''', '''text''']
if not all(isinstance(answer[k] , lowercase ) for k in cols ):
raise ValueError("Issue in ID" , example["id"] )
return answer
def lowerCAmelCase__( lowercase : str , lowercase : List[str]=False ) -> str:
__snake_case : Tuple = _get_single_answer(lowercase )
# bytes are of no use
del answer["start_byte"]
del answer["end_byte"]
# handle yes_no answers explicitly
if answer["category"][0] in ["yes", "no"]: # category is list with one element
__snake_case : str = example['''document''']['''tokens''']
__snake_case : Any = []
for i in range(len(doc["token"] ) ):
if not doc["is_html"][i]:
context.append(doc["token"][i] )
return {
"context": " ".join(lowercase ),
"answer": {
"start_token": -100, # ignore index in cross-entropy
"end_token": -100, # ignore index in cross-entropy
"category": answer["category"],
"span": answer["category"], # extra
},
}
# later, help in removing all no answers
if answer["start_token"] == [-1]:
return {
"context": "None",
"answer": {
"start_token": -1,
"end_token": -1,
"category": "null",
"span": "None", # extra
},
}
# handling normal samples
__snake_case : Optional[int] = ['''start_token''', '''end_token''']
answer.update({k: answer[k][0] if len(answer[k] ) > 0 else answer[k] for k in cols} ) # e.g. [10] == 10
__snake_case : Optional[Any] = example['''document''']['''tokens''']
__snake_case : List[Any] = answer['''start_token''']
__snake_case : Dict = answer['''end_token''']
__snake_case : Tuple = []
for i in range(len(doc["token"] ) ):
if not doc["is_html"][i]:
context.append(doc["token"][i] )
else:
if answer["start_token"] > i:
start_token -= 1
if answer["end_token"] > i:
end_token -= 1
__snake_case : Optional[int] = ''' '''.join(context[start_token:end_token] )
# checking above code
if assertion:
__snake_case : str = doc['''is_html'''][answer['''start_token'''] : answer['''end_token''']]
__snake_case : Union[str, Any] = doc['''token'''][answer['''start_token'''] : answer['''end_token''']]
__snake_case : Dict = ''' '''.join([old[i] for i in range(len(lowercase ) ) if not is_html[i]] )
if new != old:
print("ID:" , example["id"] )
print("New:" , lowercase , end="\n" )
print("Old:" , lowercase , end="\n\n" )
return {
"context": " ".join(lowercase ),
"answer": {
"start_token": start_token,
"end_token": end_token - 1, # this makes it inclusive
"category": answer["category"], # either long or short
"span": new, # extra
},
}
def lowerCAmelCase__( lowercase : Optional[int] , lowercase : Union[str, Any] , lowercase : int=2048 , lowercase : int=4096 , lowercase : Tuple=True ) -> Dict:
__snake_case : Optional[Any] = get_context_and_ans(lowercase , assertion=lowercase )
__snake_case : List[str] = out['''answer''']
# later, removing these samples
if answer["start_token"] == -1:
return {
"example_id": example["id"],
"input_ids": [[-1]],
"labels": {
"start_token": [-1],
"end_token": [-1],
"category": ["null"],
},
}
__snake_case : Optional[int] = tokenizer(example["question"]["text"] , out["context"] ).input_ids
__snake_case : Optional[int] = input_ids.index(tokenizer.sep_token_id ) + 1
# return yes/no
if answer["category"][0] in ["yes", "no"]: # category is list with one element
__snake_case : int = []
__snake_case : Any = []
__snake_case : Tuple = input_ids[:q_len]
__snake_case : Any = range(lowercase , len(lowercase ) , max_length - doc_stride )
for i in doc_start_indices:
__snake_case : Tuple = i + max_length - q_len
__snake_case : Any = input_ids[i:end_index]
inputs.append(q_indices + slice )
category.append(answer["category"][0] )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": [-100] * len(lowercase ),
"end_token": [-100] * len(lowercase ),
"category": category,
},
}
__snake_case : Tuple = out['''context'''].split()
__snake_case : Optional[Any] = splitted_context[answer['''end_token''']]
__snake_case : List[str] = len(
tokenizer(
" ".join(splitted_context[: answer["start_token"]] ) , add_special_tokens=lowercase , ).input_ids )
__snake_case : Dict = len(
tokenizer(" ".join(splitted_context[: answer["end_token"]] ) , add_special_tokens=lowercase ).input_ids )
answer["start_token"] += q_len
answer["end_token"] += q_len
# fixing end token
__snake_case : Any = len(tokenizer(lowercase , add_special_tokens=lowercase ).input_ids )
if num_sub_tokens > 1:
answer["end_token"] += num_sub_tokens - 1
__snake_case : Optional[Any] = input_ids[answer['''start_token'''] : answer['''end_token'''] + 1] # right & left are inclusive
__snake_case : List[Any] = answer['''start_token''']
__snake_case : Optional[Any] = answer['''end_token''']
if assertion:
__snake_case : Optional[int] = tokenizer.decode(lowercase )
if answer["span"] != new:
print("ISSUE IN TOKENIZATION" )
print("OLD:" , answer["span"] )
print("NEW:" , lowercase , end="\n\n" )
if len(lowercase ) <= max_length:
return {
"example_id": example["id"],
"input_ids": [input_ids],
"labels": {
"start_token": [answer["start_token"]],
"end_token": [answer["end_token"]],
"category": answer["category"],
},
}
__snake_case : List[str] = input_ids[:q_len]
__snake_case : Optional[Any] = range(lowercase , len(lowercase ) , max_length - doc_stride )
__snake_case : str = []
__snake_case : List[str] = []
__snake_case : str = []
__snake_case : List[Any] = [] # null, yes, no, long, short
for i in doc_start_indices:
__snake_case : str = i + max_length - q_len
__snake_case : str = input_ids[i:end_index]
inputs.append(q_indices + slice )
assert len(inputs[-1] ) <= max_length, "Issue in truncating length"
if start_token >= i and end_token <= end_index - 1:
__snake_case : Tuple = start_token - i + q_len
__snake_case : List[str] = end_token - i + q_len
answers_category.append(answer["category"][0] ) # ["short"] -> "short"
else:
__snake_case : Any = -100
__snake_case : Union[str, Any] = -100
answers_category.append("null" )
__snake_case : str = inputs[-1][start_token : end_token + 1]
answers_start_token.append(lowercase )
answers_end_token.append(lowercase )
if assertion:
if new != old and new != [tokenizer.cls_token_id]:
print("ISSUE in strided for ID:" , example["id"] )
print("New:" , tokenizer.decode(lowercase ) )
print("Old:" , tokenizer.decode(lowercase ) , end="\n\n" )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": answers_start_token,
"end_token": answers_end_token,
"category": answers_category,
},
}
def lowerCAmelCase__( lowercase : Dict , lowercase : Dict , lowercase : int=2048 , lowercase : Dict=4096 , lowercase : Any=False ) -> Optional[Any]:
__snake_case : Any = get_strided_contexts_and_ans(
lowercase , lowercase , doc_stride=lowercase , max_length=lowercase , assertion=lowercase , )
return example
def lowerCAmelCase__( lowercase : Any , lowercase : List[Any] ) -> Tuple:
with jsonlines.open(lowercase , "a" ) as writer:
for example in tqdm(lowercase , total=len(lowercase ) , desc="Saving samples ... " ):
__snake_case : Union[str, Any] = example['''labels''']
for ids, start, end, cat in zip(
example["input_ids"] , labels["start_token"] , labels["end_token"] , labels["category"] , ):
if start == -1 and end == -1:
continue # leave waste samples with no answer
if cat == "null" and np.random.rand() < 0.6:
continue # removing 50 % samples
writer.write(
{
"input_ids": ids,
"start_token": start,
"end_token": end,
"category": CATEGORY_MAPPING[cat],
} )
if __name__ == "__main__":
from datasets import load_dataset
from transformers import BigBirdTokenizer
_UpperCamelCase = load_dataset('''natural_questions''')
_UpperCamelCase = BigBirdTokenizer.from_pretrained('''google/bigbird-roberta-base''')
_UpperCamelCase = data['''train''' if PROCESS_TRAIN == '''true''' else '''validation''']
_UpperCamelCase = {
'''tokenizer''': tokenizer,
'''doc_stride''': DOC_STRIDE,
'''max_length''': MAX_LENGTH,
'''assertion''': False,
}
_UpperCamelCase = data.map(prepare_inputs, fn_kwargs=fn_kwargs)
_UpperCamelCase = data.remove_columns(['''annotations''', '''document''', '''id''', '''question'''])
print(data)
np.random.seed(SEED)
_UpperCamelCase = '''nq-training.jsonl''' if PROCESS_TRAIN == '''true''' else '''nq-validation.jsonl'''
save_to_disk(data, file_name=cache_file_name)
| 326
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class lowerCamelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
a__ : int = StableUnCLIPImgaImgPipeline
a__ : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS
a__ : Union[str, Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
a__ : Optional[Any] = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
a__ : int = frozenset([] )
def UpperCamelCase__ ( self) -> Tuple:
__UpperCamelCase :Tuple = 32
__UpperCamelCase :Optional[int] = embedder_hidden_size
# image encoding components
__UpperCamelCase :Union[str, Any] = CLIPImageProcessor(crop_size=32 , size=32)
torch.manual_seed(0)
__UpperCamelCase :Union[str, Any] = CLIPVisionModelWithProjection(
CLIPVisionConfig(
hidden_size=__lowercase , projection_dim=__lowercase , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ))
# regular denoising components
torch.manual_seed(0)
__UpperCamelCase :str = StableUnCLIPImageNormalizer(embedding_dim=__lowercase)
__UpperCamelCase :Optional[int] = DDPMScheduler(beta_schedule='''squaredcos_cap_v2''')
torch.manual_seed(0)
__UpperCamelCase :Union[str, Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''')
torch.manual_seed(0)
__UpperCamelCase :Dict = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=__lowercase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ))
torch.manual_seed(0)
__UpperCamelCase :List[Any] = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='''projection''' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=__lowercase , layers_per_block=1 , upcast_attention=__lowercase , use_linear_projection=__lowercase , )
torch.manual_seed(0)
__UpperCamelCase :Tuple = DDIMScheduler(
beta_schedule='''scaled_linear''' , beta_start=0.0_00_85 , beta_end=0.0_12 , prediction_type='''v_prediction''' , set_alpha_to_one=__lowercase , steps_offset=1 , )
torch.manual_seed(0)
__UpperCamelCase :List[str] = AutoencoderKL()
__UpperCamelCase :Tuple = {
# image encoding components
'''feature_extractor''': feature_extractor,
'''image_encoder''': image_encoder.eval(),
# image noising components
'''image_normalizer''': image_normalizer.eval(),
'''image_noising_scheduler''': image_noising_scheduler,
# regular denoising components
'''tokenizer''': tokenizer,
'''text_encoder''': text_encoder.eval(),
'''unet''': unet.eval(),
'''scheduler''': scheduler,
'''vae''': vae.eval(),
}
return components
def UpperCamelCase__ ( self , __lowercase , __lowercase=0 , __lowercase=True) -> str:
if str(__lowercase).startswith('''mps'''):
__UpperCamelCase :Union[str, Any] = torch.manual_seed(__lowercase)
else:
__UpperCamelCase :int = torch.Generator(device=__lowercase).manual_seed(__lowercase)
__UpperCamelCase :int = floats_tensor((1, 3, 32, 32) , rng=random.Random(__lowercase)).to(__lowercase)
if pil_image:
__UpperCamelCase :List[Any] = input_image * 0.5 + 0.5
__UpperCamelCase :Optional[Any] = input_image.clamp(0 , 1)
__UpperCamelCase :int = input_image.cpu().permute(0 , 2 , 3 , 1).float().numpy()
__UpperCamelCase :Optional[Any] = DiffusionPipeline.numpy_to_pil(__lowercase)[0]
return {
"prompt": "An anime racoon running a marathon",
"image": input_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
@skip_mps
def UpperCamelCase__ ( self) -> Union[str, Any]:
__UpperCamelCase :Dict = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__UpperCamelCase :Tuple = self.get_dummy_components()
__UpperCamelCase :Any = StableUnCLIPImgaImgPipeline(**__lowercase)
__UpperCamelCase :Optional[Any] = sd_pipe.to(__lowercase)
sd_pipe.set_progress_bar_config(disable=__lowercase)
__UpperCamelCase :List[Any] = self.get_dummy_inputs(__lowercase)
inputs.update({'''image_embeds''': None})
__UpperCamelCase :Any = sd_pipe(**__lowercase).images
__UpperCamelCase :List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__UpperCamelCase :List[Any] = np.array([0.38_72, 0.72_24, 0.56_01, 0.47_41, 0.68_72, 0.58_14, 0.46_36, 0.38_67, 0.50_78])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
def UpperCamelCase__ ( self) -> str:
__UpperCamelCase :Optional[Any] = torch_device in ['''cpu''', '''mps''']
self._test_attention_slicing_forward_pass(test_max_difference=__lowercase)
def UpperCamelCase__ ( self) -> List[Any]:
__UpperCamelCase :Optional[Any] = torch_device in ['''cpu''', '''mps''']
self._test_inference_batch_single_identical(test_max_difference=__lowercase)
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def UpperCamelCase__ ( self) -> Union[str, Any]:
self._test_xformers_attention_forwardGenerator_pass(test_max_difference=__lowercase)
@slow
@require_torch_gpu
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self) -> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self) -> Union[str, Any]:
__UpperCamelCase :int = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''')
__UpperCamelCase :Any = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy''')
__UpperCamelCase :List[Any] = StableUnCLIPImgaImgPipeline.from_pretrained(
'''fusing/stable-unclip-2-1-l-img2img''' , torch_dtype=torch.floataa)
pipe.to(__lowercase)
pipe.set_progress_bar_config(disable=__lowercase)
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__UpperCamelCase :int = torch.Generator(device='''cpu''').manual_seed(0)
__UpperCamelCase :Dict = pipe(__lowercase , '''anime turle''' , generator=__lowercase , output_type='''np''')
__UpperCamelCase :Dict = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(__lowercase , __lowercase)
def UpperCamelCase__ ( self) -> List[str]:
__UpperCamelCase :Optional[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''')
__UpperCamelCase :Any = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy''')
__UpperCamelCase :Any = StableUnCLIPImgaImgPipeline.from_pretrained(
'''fusing/stable-unclip-2-1-h-img2img''' , torch_dtype=torch.floataa)
pipe.to(__lowercase)
pipe.set_progress_bar_config(disable=__lowercase)
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__UpperCamelCase :int = torch.Generator(device='''cpu''').manual_seed(0)
__UpperCamelCase :Optional[int] = pipe(__lowercase , '''anime turle''' , generator=__lowercase , output_type='''np''')
__UpperCamelCase :List[Any] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(__lowercase , __lowercase)
def UpperCamelCase__ ( self) -> List[str]:
__UpperCamelCase :Dict = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''')
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__UpperCamelCase :List[Any] = StableUnCLIPImgaImgPipeline.from_pretrained(
'''fusing/stable-unclip-2-1-h-img2img''' , torch_dtype=torch.floataa)
__UpperCamelCase :Union[str, Any] = pipe.to(__lowercase)
pipe.set_progress_bar_config(disable=__lowercase)
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__UpperCamelCase :Optional[Any] = pipe(
__lowercase , '''anime turtle''' , num_inference_steps=2 , output_type='''np''' , )
__UpperCamelCase :int = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 43
| 0
|
'''simple docstring'''
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
UpperCamelCase_ = logging.get_logger(__name__)
# General docstring
UpperCamelCase_ = "RegNetConfig"
# Base docstring
UpperCamelCase_ = "facebook/regnet-y-040"
UpperCamelCase_ = [1, 1_0_8_8, 7, 7]
# Image classification docstring
UpperCamelCase_ = "facebook/regnet-y-040"
UpperCamelCase_ = "tabby, tabby cat"
UpperCamelCase_ = [
"facebook/regnet-y-040",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class _a ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self, A, A = 3, A = 1, A = 1, A = "relu", **A, ):
'''simple docstring'''
super().__init__(**__lowercase )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
SCREAMING_SNAKE_CASE : Optional[int] = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
SCREAMING_SNAKE_CASE : Any = tf.keras.layers.ConvaD(
filters=__lowercase, kernel_size=__lowercase, strides=__lowercase, padding='VALID', groups=__lowercase, use_bias=__lowercase, name='convolution', )
SCREAMING_SNAKE_CASE : Tuple = tf.keras.layers.BatchNormalization(epsilon=1E-5, momentum=0.9, name='normalization' )
SCREAMING_SNAKE_CASE : List[str] = ACTaFN[activation] if activation is not None else tf.identity
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.convolution(self.padding(__lowercase ) )
SCREAMING_SNAKE_CASE : Optional[int] = self.normalization(__lowercase )
SCREAMING_SNAKE_CASE : Tuple = self.activation(__lowercase )
return hidden_state
class _a ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self, A, **A ):
'''simple docstring'''
super().__init__(**__lowercase )
SCREAMING_SNAKE_CASE : Tuple = config.num_channels
SCREAMING_SNAKE_CASE : Optional[int] = TFRegNetConvLayer(
out_channels=config.embedding_size, kernel_size=3, stride=2, activation=config.hidden_act, name='embedder', )
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = shape_list(__lowercase )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
SCREAMING_SNAKE_CASE : Dict = tf.transpose(__lowercase, perm=(0, 2, 3, 1) )
SCREAMING_SNAKE_CASE : Optional[int] = self.embedder(__lowercase )
return hidden_state
class _a ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self, A, A = 2, **A ):
'''simple docstring'''
super().__init__(**__lowercase )
SCREAMING_SNAKE_CASE : Union[str, Any] = tf.keras.layers.ConvaD(
filters=__lowercase, kernel_size=1, strides=__lowercase, use_bias=__lowercase, name='convolution' )
SCREAMING_SNAKE_CASE : int = tf.keras.layers.BatchNormalization(epsilon=1E-5, momentum=0.9, name='normalization' )
def UpperCamelCase_ ( self, A, A = False ):
'''simple docstring'''
return self.normalization(self.convolution(__lowercase ), training=__lowercase )
class _a ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self, A, A, **A ):
'''simple docstring'''
super().__init__(**__lowercase )
SCREAMING_SNAKE_CASE : int = tf.keras.layers.GlobalAveragePoolingaD(keepdims=__lowercase, name='pooler' )
SCREAMING_SNAKE_CASE : List[Any] = [
tf.keras.layers.ConvaD(filters=__lowercase, kernel_size=1, activation='relu', name='attention.0' ),
tf.keras.layers.ConvaD(filters=__lowercase, kernel_size=1, activation='sigmoid', name='attention.2' ),
]
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.pooler(__lowercase )
for layer_module in self.attention:
SCREAMING_SNAKE_CASE : List[str] = layer_module(__lowercase )
SCREAMING_SNAKE_CASE : int = hidden_state * pooled
return hidden_state
class _a ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self, A, A, A, A = 1, **A ):
'''simple docstring'''
super().__init__(**__lowercase )
SCREAMING_SNAKE_CASE : str = in_channels != out_channels or stride != 1
SCREAMING_SNAKE_CASE : Any = max(1, out_channels // config.groups_width )
SCREAMING_SNAKE_CASE : Optional[Any] = (
TFRegNetShortCut(__lowercase, stride=__lowercase, name='shortcut' )
if should_apply_shortcut
else tf.keras.layers.Activation('linear', name='shortcut' )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
SCREAMING_SNAKE_CASE : Optional[int] = [
TFRegNetConvLayer(__lowercase, kernel_size=1, activation=config.hidden_act, name='layer.0' ),
TFRegNetConvLayer(
__lowercase, stride=__lowercase, groups=__lowercase, activation=config.hidden_act, name='layer.1' ),
TFRegNetConvLayer(__lowercase, kernel_size=1, activation=__lowercase, name='layer.2' ),
]
SCREAMING_SNAKE_CASE : Dict = ACTaFN[config.hidden_act]
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = hidden_state
for layer_module in self.layers:
SCREAMING_SNAKE_CASE : Any = layer_module(__lowercase )
SCREAMING_SNAKE_CASE : Tuple = self.shortcut(__lowercase )
hidden_state += residual
SCREAMING_SNAKE_CASE : Optional[Any] = self.activation(__lowercase )
return hidden_state
class _a ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self, A, A, A, A = 1, **A ):
'''simple docstring'''
super().__init__(**__lowercase )
SCREAMING_SNAKE_CASE : int = in_channels != out_channels or stride != 1
SCREAMING_SNAKE_CASE : Optional[int] = max(1, out_channels // config.groups_width )
SCREAMING_SNAKE_CASE : Dict = (
TFRegNetShortCut(__lowercase, stride=__lowercase, name='shortcut' )
if should_apply_shortcut
else tf.keras.layers.Activation('linear', name='shortcut' )
)
SCREAMING_SNAKE_CASE : Dict = [
TFRegNetConvLayer(__lowercase, kernel_size=1, activation=config.hidden_act, name='layer.0' ),
TFRegNetConvLayer(
__lowercase, stride=__lowercase, groups=__lowercase, activation=config.hidden_act, name='layer.1' ),
TFRegNetSELayer(__lowercase, reduced_channels=int(round(in_channels / 4 ) ), name='layer.2' ),
TFRegNetConvLayer(__lowercase, kernel_size=1, activation=__lowercase, name='layer.3' ),
]
SCREAMING_SNAKE_CASE : Dict = ACTaFN[config.hidden_act]
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = hidden_state
for layer_module in self.layers:
SCREAMING_SNAKE_CASE : Optional[Any] = layer_module(__lowercase )
SCREAMING_SNAKE_CASE : List[Any] = self.shortcut(__lowercase )
hidden_state += residual
SCREAMING_SNAKE_CASE : List[str] = self.activation(__lowercase )
return hidden_state
class _a ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self, A, A, A, A = 2, A = 2, **A ):
'''simple docstring'''
super().__init__(**__lowercase )
SCREAMING_SNAKE_CASE : List[Any] = TFRegNetXLayer if config.layer_type == '''x''' else TFRegNetYLayer
SCREAMING_SNAKE_CASE : List[str] = [
# downsampling is done in the first layer with stride of 2
layer(__lowercase, __lowercase, __lowercase, stride=__lowercase, name='layers.0' ),
*[layer(__lowercase, __lowercase, __lowercase, name=F"layers.{i+1}" ) for i in range(depth - 1 )],
]
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
for layer_module in self.layers:
SCREAMING_SNAKE_CASE : Tuple = layer_module(__lowercase )
return hidden_state
class _a ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self, A, **A ):
'''simple docstring'''
super().__init__(**__lowercase )
SCREAMING_SNAKE_CASE : Any = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
__lowercase, config.embedding_size, config.hidden_sizes[0], stride=2 if config.downsample_in_first_stage else 1, depth=config.depths[0], name='stages.0', ) )
SCREAMING_SNAKE_CASE : str = zip(config.hidden_sizes, config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(__lowercase, config.depths[1:] ) ):
self.stages.append(TFRegNetStage(__lowercase, __lowercase, __lowercase, depth=__lowercase, name=F"stages.{i+1}" ) )
def UpperCamelCase_ ( self, A, A = False, A = True ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
SCREAMING_SNAKE_CASE : Tuple = hidden_states + (hidden_state,)
SCREAMING_SNAKE_CASE : Optional[Any] = stage_module(__lowercase )
if output_hidden_states:
SCREAMING_SNAKE_CASE : Tuple = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=__lowercase, hidden_states=__lowercase )
@keras_serializable
class _a ( tf.keras.layers.Layer ):
'''simple docstring'''
A : Optional[Any] = RegNetConfig
def __init__( self, A, **A ):
'''simple docstring'''
super().__init__(**__lowercase )
SCREAMING_SNAKE_CASE : List[str] = config
SCREAMING_SNAKE_CASE : List[Any] = TFRegNetEmbeddings(__lowercase, name='embedder' )
SCREAMING_SNAKE_CASE : Any = TFRegNetEncoder(__lowercase, name='encoder' )
SCREAMING_SNAKE_CASE : Optional[int] = tf.keras.layers.GlobalAveragePoolingaD(keepdims=__lowercase, name='pooler' )
@unpack_inputs
def UpperCamelCase_ ( self, A, A = None, A = None, A = False, ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
SCREAMING_SNAKE_CASE : Union[str, Any] = return_dict if return_dict is not None else self.config.use_return_dict
SCREAMING_SNAKE_CASE : Union[str, Any] = self.embedder(__lowercase, training=__lowercase )
SCREAMING_SNAKE_CASE : Any = self.encoder(
__lowercase, output_hidden_states=__lowercase, return_dict=__lowercase, training=__lowercase )
SCREAMING_SNAKE_CASE : List[str] = encoder_outputs[0]
SCREAMING_SNAKE_CASE : List[str] = self.pooler(__lowercase )
# Change to NCHW output format have uniformity in the modules
SCREAMING_SNAKE_CASE : str = tf.transpose(__lowercase, perm=(0, 3, 1, 2) )
SCREAMING_SNAKE_CASE : List[Any] = tf.transpose(__lowercase, perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
SCREAMING_SNAKE_CASE : List[str] = tuple([tf.transpose(__lowercase, perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=__lowercase, pooler_output=__lowercase, hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states, )
class _a ( UpperCAmelCase_ ):
'''simple docstring'''
A : int = RegNetConfig
A : List[str] = """regnet"""
A : Optional[int] = """pixel_values"""
@property
def UpperCamelCase_ ( self ):
'''simple docstring'''
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 224, 224), dtype=tf.floataa )}
UpperCamelCase_ = R"\n Parameters:\n This model is a Tensorflow\n [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a\n regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and\n behavior.\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n"
UpperCamelCase_ = R"\n Args:\n pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConveNextImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
'''The bare RegNet model outputting raw features without any specific head on top.''' , UpperCAmelCase_ , )
class _a ( UpperCAmelCase_ ):
'''simple docstring'''
def __init__( self, A, *A, **A ):
'''simple docstring'''
super().__init__(__lowercase, *__lowercase, **__lowercase )
SCREAMING_SNAKE_CASE : Tuple = TFRegNetMainLayer(__lowercase, name='regnet' )
@unpack_inputs
@add_start_docstrings_to_model_forward(__lowercase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC, output_type=__lowercase, config_class=_CONFIG_FOR_DOC, modality='vision', expected_output=_EXPECTED_OUTPUT_SHAPE, )
def UpperCamelCase_ ( self, A, A = None, A = None, A=False, ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
SCREAMING_SNAKE_CASE : Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict
SCREAMING_SNAKE_CASE : List[Any] = self.regnet(
pixel_values=__lowercase, output_hidden_states=__lowercase, return_dict=__lowercase, training=__lowercase, )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state, pooler_output=outputs.pooler_output, hidden_states=outputs.hidden_states, )
@add_start_docstrings(
'''
RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
''' , UpperCAmelCase_ , )
class _a ( UpperCAmelCase_ , UpperCAmelCase_ ):
'''simple docstring'''
def __init__( self, A, *A, **A ):
'''simple docstring'''
super().__init__(__lowercase, *__lowercase, **__lowercase )
SCREAMING_SNAKE_CASE : Optional[Any] = config.num_labels
SCREAMING_SNAKE_CASE : str = TFRegNetMainLayer(__lowercase, name='regnet' )
# classification head
SCREAMING_SNAKE_CASE : List[Any] = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels, name='classifier.1' ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(__lowercase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT, output_type=__lowercase, config_class=_CONFIG_FOR_DOC, expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT, )
def UpperCamelCase_ ( self, A = None, A = None, A = None, A = None, A=False, ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
SCREAMING_SNAKE_CASE : int = return_dict if return_dict is not None else self.config.use_return_dict
SCREAMING_SNAKE_CASE : Union[str, Any] = self.regnet(
__lowercase, output_hidden_states=__lowercase, return_dict=__lowercase, training=__lowercase )
SCREAMING_SNAKE_CASE : List[str] = outputs.pooler_output if return_dict else outputs[1]
SCREAMING_SNAKE_CASE : str = self.classifier[0](__lowercase )
SCREAMING_SNAKE_CASE : List[Any] = self.classifier[1](__lowercase )
SCREAMING_SNAKE_CASE : Tuple = None if labels is None else self.hf_compute_loss(labels=__lowercase, logits=__lowercase )
if not return_dict:
SCREAMING_SNAKE_CASE : Tuple = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=__lowercase, logits=__lowercase, hidden_states=outputs.hidden_states )
| 251
|
import numpy as np
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 1e-12 , SCREAMING_SNAKE_CASE = 100 , ):
'''simple docstring'''
assert np.shape(SCREAMING_SNAKE_CASE )[0] == np.shape(SCREAMING_SNAKE_CASE )[1]
# Ensure proper dimensionality.
assert np.shape(SCREAMING_SNAKE_CASE )[0] == np.shape(SCREAMING_SNAKE_CASE )[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(SCREAMING_SNAKE_CASE ) == np.iscomplexobj(SCREAMING_SNAKE_CASE )
__UpperCamelCase :List[Any] = np.iscomplexobj(SCREAMING_SNAKE_CASE )
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(SCREAMING_SNAKE_CASE , input_matrix.conj().T )
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
__UpperCamelCase :str = False
__UpperCamelCase :int = 0
__UpperCamelCase :Optional[Any] = 0
__UpperCamelCase :Union[str, Any] = 1e12
while not convergence:
# Multiple matrix by the vector.
__UpperCamelCase :List[str] = np.dot(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Normalize the resulting output vector.
__UpperCamelCase :Tuple = w / np.linalg.norm(SCREAMING_SNAKE_CASE )
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
__UpperCamelCase :int = vector.conj().T if is_complex else vector.T
__UpperCamelCase :Optional[int] = np.dot(SCREAMING_SNAKE_CASE , np.dot(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
# Check convergence.
__UpperCamelCase :Optional[Any] = np.abs(lambda_ - lambda_previous ) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
__UpperCamelCase :Dict = True
__UpperCamelCase :List[Any] = lambda_
if is_complex:
__UpperCamelCase :Tuple = np.real(lambda_ )
return lambda_, vector
def lowerCamelCase ( ):
'''simple docstring'''
__UpperCamelCase :int = np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]] )
__UpperCamelCase :Optional[Any] = np.array([41, 4, 20] )
__UpperCamelCase :Any = real_input_matrix.astype(np.complexaaa )
__UpperCamelCase :Dict = np.triu(1j * complex_input_matrix , 1 )
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
__UpperCamelCase :Optional[int] = np.array([41, 4, 20] ).astype(np.complexaaa )
for problem_type in ["real", "complex"]:
if problem_type == "real":
__UpperCamelCase :Any = real_input_matrix
__UpperCamelCase :int = real_vector
elif problem_type == "complex":
__UpperCamelCase :Tuple = complex_input_matrix
__UpperCamelCase :Optional[Any] = complex_vector
# Our implementation.
__UpperCamelCase , __UpperCamelCase :Dict = power_iteration(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
__UpperCamelCase , __UpperCamelCase :List[Any] = np.linalg.eigh(SCREAMING_SNAKE_CASE )
# Last eigenvalue is the maximum one.
__UpperCamelCase :List[Any] = eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
__UpperCamelCase :str = eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max ) <= 1e-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(SCREAMING_SNAKE_CASE ) - np.abs(SCREAMING_SNAKE_CASE ) ) <= 1e-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration()
| 43
| 0
|
'''simple docstring'''
# Algorithm for the pigeonhole sorting
def __UpperCAmelCase ( a_: Union[str, Any] ):
_UpperCAmelCase : Optional[Any] = min(a_ ) # min() finds the minimum value
_UpperCAmelCase : List[Any] = max(a_ ) # max() finds the maximum value
_UpperCAmelCase : List[Any] = max_val - min_val + 1 # size is difference of max and min values plus one
# list of pigeonholes of size equal to the variable size
_UpperCAmelCase : List[Any] = [0] * size
# Populate the pigeonholes.
for x in a:
assert isinstance(a_, a_ ), "integers only please"
holes[x - min_val] += 1
# Putting the elements back into the array in an order.
_UpperCAmelCase : Optional[Any] = 0
for count in range(a_ ):
while holes[count] > 0:
holes[count] -= 1
_UpperCAmelCase : int = count + min_val
i += 1
def __UpperCAmelCase ( ):
_UpperCAmelCase : Optional[Any] = [8, 3, 2, 7, 4, 6, 8]
pigeonhole_sort(a_ )
print("Sorted order is:", " ".join(a_ ) )
if __name__ == "__main__":
main()
| 145
|
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__lowercase = logging.get_logger(__name__)
__lowercase = {'''tokenizer_file''': '''tokenizer.json'''}
__lowercase = {
'''tokenizer_file''': {
'''bigscience/tokenizer''': '''https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json''',
'''bigscience/bloom-560m''': '''https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json''',
'''bigscience/bloom-1b1''': '''https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json''',
'''bigscience/bloom-1b7''': '''https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json''',
'''bigscience/bloom-3b''': '''https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json''',
'''bigscience/bloom-7b1''': '''https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json''',
'''bigscience/bloom''': '''https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json''',
},
}
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
a__ : int = VOCAB_FILES_NAMES
a__ : Tuple = PRETRAINED_VOCAB_FILES_MAP
a__ : List[str] = ["""input_ids""", """attention_mask"""]
a__ : int = None
def __init__( self , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase="<unk>" , __lowercase="<s>" , __lowercase="</s>" , __lowercase="<pad>" , __lowercase=False , __lowercase=False , **__lowercase , ) -> List[str]:
super().__init__(
__lowercase , __lowercase , tokenizer_file=__lowercase , unk_token=__lowercase , bos_token=__lowercase , eos_token=__lowercase , pad_token=__lowercase , add_prefix_space=__lowercase , clean_up_tokenization_spaces=__lowercase , **__lowercase , )
__UpperCamelCase :int = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
if pre_tok_state.get('''add_prefix_space''' , __lowercase) != add_prefix_space:
__UpperCamelCase :Any = getattr(__lowercase , pre_tok_state.pop('''type'''))
__UpperCamelCase :str = add_prefix_space
__UpperCamelCase :List[str] = pre_tok_class(**__lowercase)
__UpperCamelCase :Tuple = add_prefix_space
def UpperCamelCase__ ( self , *__lowercase , **__lowercase) -> BatchEncoding:
__UpperCamelCase :Tuple = kwargs.get('''is_split_into_words''' , __lowercase)
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"""
''' pretokenized inputs.''')
return super()._batch_encode_plus(*__lowercase , **__lowercase)
def UpperCamelCase__ ( self , *__lowercase , **__lowercase) -> BatchEncoding:
__UpperCamelCase :List[str] = kwargs.get('''is_split_into_words''' , __lowercase)
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"""
''' pretokenized inputs.''')
return super()._encode_plus(*__lowercase , **__lowercase)
def UpperCamelCase__ ( self , __lowercase , __lowercase = None) -> Tuple[str]:
__UpperCamelCase :Optional[Any] = self._tokenizer.model.save(__lowercase , name=__lowercase)
return tuple(__lowercase)
def UpperCamelCase__ ( self , __lowercase) -> List[int]:
__UpperCamelCase :str = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(__lowercase , add_special_tokens=__lowercase) + [self.eos_token_id])
if len(__lowercase) > self.model_max_length:
__UpperCamelCase :Any = input_ids[-self.model_max_length :]
return input_ids
| 43
| 0
|
import argparse
import pathlib
import fairseq
import torch
from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
from fairseq.modules import TransformerSentenceEncoderLayer
from packaging import version
from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.models.roberta.modeling_roberta import RobertaAttention
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("1.0.0a"):
raise Exception("requires fairseq >= 1.0.0a")
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
_snake_case = "Hello world! cécé herlolip"
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = FairseqRobertaModel.from_pretrained(_lowerCamelCase )
roberta.eval() # disable dropout
_lowerCAmelCase : Optional[Any] = roberta.model.encoder.sentence_encoder
_lowerCAmelCase : Dict = XLMRobertaConfig(
vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings , hidden_size=roberta.cfg.model.encoder_embed_dim , num_hidden_layers=roberta.cfg.model.encoder_layers , num_attention_heads=roberta.cfg.model.encoder_attention_heads , intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1e-5 , )
if classification_head:
_lowerCAmelCase : Union[str, Any] = roberta.model.classification_heads['''mnli'''].out_proj.weight.shape[0]
print("Our RoBERTa config:" , _lowerCamelCase )
_lowerCAmelCase : Tuple = XLMRobertaXLForSequenceClassification(_lowerCamelCase ) if classification_head else XLMRobertaXLForMaskedLM(_lowerCamelCase )
model.eval()
# Now let's copy all the weights.
# Embeddings
_lowerCAmelCase : str = roberta_sent_encoder.embed_tokens.weight
_lowerCAmelCase : int = roberta_sent_encoder.embed_positions.weight
_lowerCAmelCase : Optional[Any] = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them.
_lowerCAmelCase : List[Any] = roberta_sent_encoder.layer_norm.weight
_lowerCAmelCase : Dict = roberta_sent_encoder.layer_norm.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
_lowerCAmelCase : BertLayer = model.roberta.encoder.layer[i]
_lowerCAmelCase : TransformerSentenceEncoderLayer = roberta_sent_encoder.layers[i]
_lowerCAmelCase : RobertaAttention = layer.attention
_lowerCAmelCase : Optional[int] = roberta_layer.self_attn_layer_norm.weight
_lowerCAmelCase : Tuple = roberta_layer.self_attn_layer_norm.bias
# self attention
_lowerCAmelCase : BertSelfAttention = layer.attention.self
assert (
roberta_layer.self_attn.k_proj.weight.data.shape
== roberta_layer.self_attn.q_proj.weight.data.shape
== roberta_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
)
_lowerCAmelCase : List[str] = roberta_layer.self_attn.q_proj.weight
_lowerCAmelCase : Optional[Any] = roberta_layer.self_attn.q_proj.bias
_lowerCAmelCase : Any = roberta_layer.self_attn.k_proj.weight
_lowerCAmelCase : List[Any] = roberta_layer.self_attn.k_proj.bias
_lowerCAmelCase : Optional[Any] = roberta_layer.self_attn.v_proj.weight
_lowerCAmelCase : Optional[int] = roberta_layer.self_attn.v_proj.bias
# self-attention output
_lowerCAmelCase : BertSelfOutput = layer.attention.output
assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape
_lowerCAmelCase : Dict = roberta_layer.self_attn.out_proj.weight
_lowerCAmelCase : int = roberta_layer.self_attn.out_proj.bias
# this one is final layer norm
_lowerCAmelCase : Optional[Any] = roberta_layer.final_layer_norm.weight
_lowerCAmelCase : List[Any] = roberta_layer.final_layer_norm.bias
# intermediate
_lowerCAmelCase : BertIntermediate = layer.intermediate
assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape
_lowerCAmelCase : str = roberta_layer.fca.weight
_lowerCAmelCase : int = roberta_layer.fca.bias
# output
_lowerCAmelCase : BertOutput = layer.output
assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape
_lowerCAmelCase : int = roberta_layer.fca.weight
_lowerCAmelCase : Tuple = roberta_layer.fca.bias
# end of layer
if classification_head:
_lowerCAmelCase : Optional[Any] = roberta.model.classification_heads['''mnli'''].dense.weight
_lowerCAmelCase : Union[str, Any] = roberta.model.classification_heads['''mnli'''].dense.bias
_lowerCAmelCase : List[str] = roberta.model.classification_heads['''mnli'''].out_proj.weight
_lowerCAmelCase : str = roberta.model.classification_heads['''mnli'''].out_proj.bias
else:
# LM Head
_lowerCAmelCase : Tuple = roberta.model.encoder.lm_head.dense.weight
_lowerCAmelCase : Any = roberta.model.encoder.lm_head.dense.bias
_lowerCAmelCase : int = roberta.model.encoder.lm_head.layer_norm.weight
_lowerCAmelCase : List[str] = roberta.model.encoder.lm_head.layer_norm.bias
_lowerCAmelCase : List[str] = roberta.model.encoder.lm_head.weight
_lowerCAmelCase : Dict = roberta.model.encoder.lm_head.bias
# Let's check that we get the same results.
_lowerCAmelCase : torch.Tensor = roberta.encode(_lowerCamelCase ).unsqueeze(0 ) # batch of size 1
_lowerCAmelCase : Optional[int] = model(_lowerCamelCase )[0]
if classification_head:
_lowerCAmelCase : int = roberta.model.classification_heads['''mnli'''](roberta.extract_features(_lowerCamelCase ) )
else:
_lowerCAmelCase : str = roberta.model(_lowerCamelCase )[0]
print(our_output.shape , their_output.shape )
_lowerCAmelCase : Any = torch.max(torch.abs(our_output - their_output ) ).item()
print(F"max_absolute_diff = {max_absolute_diff}" ) # ~ 1e-7
_lowerCAmelCase : List[str] = torch.allclose(_lowerCamelCase , _lowerCamelCase , atol=1e-3 )
print("Do both models output the same tensors?" , "🔥" if success else "💩" )
if not success:
raise Exception("Something went wRoNg" )
pathlib.Path(_lowerCamelCase ).mkdir(parents=_lowerCamelCase , exist_ok=_lowerCamelCase )
print(F"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--roberta_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--classification_head", action="store_true", help="Whether to convert a final classification head."
)
_snake_case = parser.parse_args()
convert_xlm_roberta_xl_checkpoint_to_pytorch(
args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 36
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase = logging.get_logger(__name__)
__lowercase = {'''ctrl''': '''https://huggingface.co/ctrl/resolve/main/config.json'''}
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
a__ : str = """ctrl"""
a__ : Dict = ["""past_key_values"""]
a__ : Tuple = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , __lowercase=246_534 , __lowercase=256 , __lowercase=1_280 , __lowercase=8_192 , __lowercase=48 , __lowercase=16 , __lowercase=0.1 , __lowercase=0.1 , __lowercase=1E-6 , __lowercase=0.02 , __lowercase=True , **__lowercase , ) -> List[Any]:
__UpperCamelCase :List[str] = vocab_size
__UpperCamelCase :Optional[Any] = n_positions
__UpperCamelCase :Dict = n_embd
__UpperCamelCase :Dict = n_layer
__UpperCamelCase :List[Any] = n_head
__UpperCamelCase :int = dff
__UpperCamelCase :Union[str, Any] = resid_pdrop
__UpperCamelCase :Optional[int] = embd_pdrop
__UpperCamelCase :List[Any] = layer_norm_epsilon
__UpperCamelCase :Dict = initializer_range
__UpperCamelCase :Any = use_cache
super().__init__(**__lowercase)
| 43
| 0
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class __A ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : Dict = StableUnCLIPImgaImgPipeline
_UpperCamelCase : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS
_UpperCamelCase : List[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
_UpperCamelCase : Tuple = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
_UpperCamelCase : List[str] = frozenset([] )
def __A ( self ):
_lowerCAmelCase : Any = 32
_lowerCAmelCase : str = embedder_hidden_size
# image encoding components
_lowerCAmelCase : int = CLIPImageProcessor(crop_size=32 , size=32 )
torch.manual_seed(0 )
_lowerCAmelCase : Optional[Any] = CLIPVisionModelWithProjection(
CLIPVisionConfig(
hidden_size=a__ , projection_dim=a__ , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ) )
# regular denoising components
torch.manual_seed(0 )
_lowerCAmelCase : Tuple = StableUnCLIPImageNormalizer(embedding_dim=a__ )
_lowerCAmelCase : str = DDPMScheduler(beta_schedule="""squaredcos_cap_v2""" )
torch.manual_seed(0 )
_lowerCAmelCase : List[str] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
torch.manual_seed(0 )
_lowerCAmelCase : Union[str, Any] = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=a__ , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
_lowerCAmelCase : str = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""CrossAttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """CrossAttnUpBlock2D""") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="""projection""" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=a__ , layers_per_block=1 , upcast_attention=a__ , use_linear_projection=a__ , )
torch.manual_seed(0 )
_lowerCAmelCase : Any = DDIMScheduler(
beta_schedule="""scaled_linear""" , beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , prediction_type="""v_prediction""" , set_alpha_to_one=a__ , steps_offset=1 , )
torch.manual_seed(0 )
_lowerCAmelCase : int = AutoencoderKL()
_lowerCAmelCase : Any = {
# image encoding components
"""feature_extractor""": feature_extractor,
"""image_encoder""": image_encoder.eval(),
# image noising components
"""image_normalizer""": image_normalizer.eval(),
"""image_noising_scheduler""": image_noising_scheduler,
# regular denoising components
"""tokenizer""": tokenizer,
"""text_encoder""": text_encoder.eval(),
"""unet""": unet.eval(),
"""scheduler""": scheduler,
"""vae""": vae.eval(),
}
return components
def __A ( self , a__ , a__=0 , a__=True ):
if str(a__ ).startswith("""mps""" ):
_lowerCAmelCase : Optional[int] = torch.manual_seed(a__ )
else:
_lowerCAmelCase : Tuple = torch.Generator(device=a__ ).manual_seed(a__ )
_lowerCAmelCase : List[str] = floats_tensor((1, 3, 32, 32) , rng=random.Random(a__ ) ).to(a__ )
if pil_image:
_lowerCAmelCase : Dict = input_image * 0.5 + 0.5
_lowerCAmelCase : Any = input_image.clamp(0 , 1 )
_lowerCAmelCase : List[Any] = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
_lowerCAmelCase : Dict = DiffusionPipeline.numpy_to_pil(a__ )[0]
return {
"prompt": "An anime racoon running a marathon",
"image": input_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
@skip_mps
def __A ( self ):
_lowerCAmelCase : Optional[Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase : Dict = self.get_dummy_components()
_lowerCAmelCase : Tuple = StableUnCLIPImgaImgPipeline(**a__ )
_lowerCAmelCase : List[str] = sd_pipe.to(a__ )
sd_pipe.set_progress_bar_config(disable=a__ )
_lowerCAmelCase : str = self.get_dummy_inputs(a__ )
inputs.update({"""image_embeds""": None} )
_lowerCAmelCase : Any = sd_pipe(**a__ ).images
_lowerCAmelCase : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_lowerCAmelCase : List[str] = np.array([0.3_8_7_2, 0.7_2_2_4, 0.5_6_0_1, 0.4_7_4_1, 0.6_8_7_2, 0.5_8_1_4, 0.4_6_3_6, 0.3_8_6_7, 0.5_0_7_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def __A ( self ):
_lowerCAmelCase : str = torch_device in ["""cpu""", """mps"""]
self._test_attention_slicing_forward_pass(test_max_difference=a__ )
def __A ( self ):
_lowerCAmelCase : str = torch_device in ["""cpu""", """mps"""]
self._test_inference_batch_single_identical(test_max_difference=a__ )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def __A ( self ):
self._test_xformers_attention_forwardGenerator_pass(test_max_difference=a__ )
@slow
@require_torch_gpu
class __A ( unittest.TestCase ):
def __A ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self ):
_lowerCAmelCase : Union[str, Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png""" )
_lowerCAmelCase : Optional[int] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy""" )
_lowerCAmelCase : Union[str, Any] = StableUnCLIPImgaImgPipeline.from_pretrained(
"""fusing/stable-unclip-2-1-l-img2img""" , torch_dtype=torch.floataa )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
_lowerCAmelCase : List[str] = torch.Generator(device="""cpu""" ).manual_seed(0 )
_lowerCAmelCase : List[Any] = pipe(a__ , """anime turle""" , generator=a__ , output_type="""np""" )
_lowerCAmelCase : str = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(a__ , a__ )
def __A ( self ):
_lowerCAmelCase : Any = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png""" )
_lowerCAmelCase : Any = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy""" )
_lowerCAmelCase : Tuple = StableUnCLIPImgaImgPipeline.from_pretrained(
"""fusing/stable-unclip-2-1-h-img2img""" , torch_dtype=torch.floataa )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
_lowerCAmelCase : str = torch.Generator(device="""cpu""" ).manual_seed(0 )
_lowerCAmelCase : Tuple = pipe(a__ , """anime turle""" , generator=a__ , output_type="""np""" )
_lowerCAmelCase : List[str] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(a__ , a__ )
def __A ( self ):
_lowerCAmelCase : int = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png""" )
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_lowerCAmelCase : Any = StableUnCLIPImgaImgPipeline.from_pretrained(
"""fusing/stable-unclip-2-1-h-img2img""" , torch_dtype=torch.floataa )
_lowerCAmelCase : List[str] = pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
_lowerCAmelCase : List[str] = pipe(
a__ , """anime turtle""" , num_inference_steps=2 , output_type="""np""" , )
_lowerCAmelCase : int = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 44
|
"""simple docstring"""
_a : List[str] = {
'Pillow': 'Pillow',
'accelerate': 'accelerate>=0.11.0',
'compel': 'compel==0.1.8',
'black': 'black~=23.1',
'datasets': 'datasets',
'filelock': 'filelock',
'flax': 'flax>=0.4.1',
'hf-doc-builder': 'hf-doc-builder>=0.3.0',
'huggingface-hub': 'huggingface-hub>=0.13.2',
'requests-mock': 'requests-mock==1.10.0',
'importlib_metadata': 'importlib_metadata',
'invisible-watermark': 'invisible-watermark',
'isort': 'isort>=5.5.4',
'jax': 'jax>=0.2.8,!=0.3.2',
'jaxlib': 'jaxlib>=0.1.65',
'Jinja2': 'Jinja2',
'k-diffusion': 'k-diffusion>=0.0.12',
'torchsde': 'torchsde',
'note_seq': 'note_seq',
'librosa': 'librosa',
'numpy': 'numpy',
'omegaconf': 'omegaconf',
'parameterized': 'parameterized',
'protobuf': 'protobuf>=3.20.3,<4',
'pytest': 'pytest',
'pytest-timeout': 'pytest-timeout',
'pytest-xdist': 'pytest-xdist',
'ruff': 'ruff>=0.0.241',
'safetensors': 'safetensors',
'sentencepiece': 'sentencepiece>=0.1.91,!=0.1.92',
'scipy': 'scipy',
'onnx': 'onnx',
'regex': 'regex!=2019.12.17',
'requests': 'requests',
'tensorboard': 'tensorboard',
'torch': 'torch>=1.4',
'torchvision': 'torchvision',
'transformers': 'transformers>=4.25.1',
'urllib3': 'urllib3<=2.0.0',
}
| 44
| 1
|
"""simple docstring"""
from scipy.stats import pearsonr
import datasets
_a : str = '\nPearson correlation coefficient and p-value for testing non-correlation.\nThe Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.\nThe p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.\n'
_a : List[str] = '\nArgs:\n predictions (`list` of `int`): Predicted class labels, as returned by a model.\n references (`list` of `int`): Ground truth labels.\n return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.\n\nReturns:\n pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.\n p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.\n\nExamples:\n\n Example 1-A simple example using only predictions and references.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n\n Example 2-The same as Example 1, but that also returns the `p-value`.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)\n >>> print(sorted(list(results.keys())))\n [\'p-value\', \'pearsonr\']\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n >>> print(round(results[\'p-value\'], 2))\n 0.15\n'
_a : List[Any] = '\n@article{2020SciPy-NMeth,\nauthor = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, Ilhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Antonio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\ntitle = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\njournal = {Nature Methods},\nyear = {2020},\nvolume = {17},\npages = {261--272},\nadsurl = {https://rdcu.be/b08Wh},\ndoi = {10.1038/s41592-019-0686-2},\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __A ( datasets.Metric ):
def __A ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""float""" ),
"""references""": datasets.Value("""float""" ),
} ) , reference_urls=["""https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html"""] , )
def __A ( self , a__ , a__ , a__=False ):
if return_pvalue:
_lowerCAmelCase : List[Any] = pearsonr(a__ , a__ )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(a__ , a__ )[0] )}
| 44
|
"""simple docstring"""
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
_a : Dict = logging.get_logger(__name__)
@add_end_docstrings(SCREAMING_SNAKE_CASE_ )
class __A ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , *a__ , **a__ ):
super().__init__(*a__ , **a__ )
requires_backends(self , """vision""" )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == """tf""" else MODEL_FOR_VISION_2_SEQ_MAPPING )
def __A ( self , a__=None , a__=None , a__=None ):
_lowerCAmelCase : List[str] = {}
_lowerCAmelCase : Union[str, Any] = {}
if prompt is not None:
_lowerCAmelCase : List[Any] = prompt
if generate_kwargs is not None:
_lowerCAmelCase : List[str] = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
_lowerCAmelCase : str = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
"""'max_new_tokens' is defined twice, once in 'generate_kwargs' and once as a direct parameter,"""
""" please use only one""" )
_lowerCAmelCase : Optional[Any] = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self , a__ , **a__ ):
return super().__call__(a__ , **a__ )
def __A ( self , a__ , a__=None ):
_lowerCAmelCase : Tuple = load_image(a__ )
if prompt is not None:
if not isinstance(a__ , a__ ):
raise ValueError(
F"Received an invalid text input, got - {type(a__ )} - but expected a single string. "
"""Note also that one single text can be provided for conditional image to text generation.""" )
_lowerCAmelCase : Optional[int] = self.model.config.model_type
if model_type == "git":
_lowerCAmelCase : Optional[Any] = self.image_processor(images=a__ , return_tensors=self.framework )
_lowerCAmelCase : List[str] = self.tokenizer(text=a__ , add_special_tokens=a__ ).input_ids
_lowerCAmelCase : Union[str, Any] = [self.tokenizer.cls_token_id] + input_ids
_lowerCAmelCase : Dict = torch.tensor(a__ ).unsqueeze(0 )
model_inputs.update({"""input_ids""": input_ids} )
elif model_type == "pix2struct":
_lowerCAmelCase : Tuple = self.image_processor(images=a__ , header_text=a__ , return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
_lowerCAmelCase : Optional[int] = self.image_processor(images=a__ , return_tensors=self.framework )
_lowerCAmelCase : Optional[int] = self.tokenizer(a__ , return_tensors=self.framework )
model_inputs.update(a__ )
else:
raise ValueError(F"Model type {model_type} does not support conditional text generation" )
else:
_lowerCAmelCase : Any = self.image_processor(images=a__ , return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
_lowerCAmelCase : Union[str, Any] = None
return model_inputs
def __A ( self , a__ , a__=None ):
# Git model sets `model_inputs["input_ids"] = None` in `preprocess` (when `prompt=None`). In batch model, the
# pipeline will group them into a list of `None`, which fail `_forward`. Avoid this by checking it first.
if (
"input_ids" in model_inputs
and isinstance(model_inputs["""input_ids"""] , a__ )
and all(x is None for x in model_inputs["""input_ids"""] )
):
_lowerCAmelCase : Optional[int] = None
if generate_kwargs is None:
_lowerCAmelCase : List[str] = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
_lowerCAmelCase : Tuple = model_inputs.pop(self.model.main_input_name )
_lowerCAmelCase : Union[str, Any] = self.model.generate(a__ , **a__ , **a__ )
return model_outputs
def __A ( self , a__ ):
_lowerCAmelCase : Optional[int] = []
for output_ids in model_outputs:
_lowerCAmelCase : Any = {
"""generated_text""": self.tokenizer.decode(
a__ , skip_special_tokens=a__ , )
}
records.append(a__ )
return records
| 44
| 1
|
"""simple docstring"""
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class __A ( unittest.TestCase ):
def __A ( self ):
_lowerCAmelCase : int = 10
def __A ( self ):
_lowerCAmelCase : str = [1, 2, 3, 4]
_lowerCAmelCase : int = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(a__ , self.block_size , 0 ) , a__ )
def __A ( self ):
_lowerCAmelCase : int = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
_lowerCAmelCase : Any = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(a__ , self.block_size , 0 ) , a__ )
def __A ( self ):
_lowerCAmelCase : str = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
_lowerCAmelCase : Optional[Any] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(a__ , self.block_size , 0 ) , a__ )
def __A ( self ):
_lowerCAmelCase : Union[str, Any] = """It was the year of Our Lord one thousand seven hundred and
seventy-five.\n\nSpiritual revelations were conceded to England at that
favoured period, as at this."""
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = process_story(a__ )
self.assertEqual(a__ , [] )
def __A ( self ):
_lowerCAmelCase : List[Any] = """"""
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = process_story(a__ )
self.assertEqual(a__ , [] )
self.assertEqual(a__ , [] )
def __A ( self ):
_lowerCAmelCase : str = (
"""It was the year of Our Lord one thousand seven hundred and """
"""seventy-five\n\nSpiritual revelations were conceded to England """
"""at that favoured period, as at this.\n@highlight\n\nIt was the best of times"""
)
_lowerCAmelCase , _lowerCAmelCase : List[str] = process_story(a__ )
_lowerCAmelCase : Union[str, Any] = [
"""It was the year of Our Lord one thousand seven hundred and seventy-five.""",
"""Spiritual revelations were conceded to England at that favoured period, as at this.""",
]
self.assertEqual(a__ , a__ )
_lowerCAmelCase : List[str] = ["""It was the best of times."""]
self.assertEqual(a__ , a__ )
def __A ( self ):
_lowerCAmelCase : Any = torch.tensor([1, 2, 3, 4] )
_lowerCAmelCase : Union[str, Any] = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(a__ , 0 ).numpy() , expected.numpy() )
def __A ( self ):
_lowerCAmelCase : Tuple = torch.tensor([1, 2, 3, 4, 23, 23, 23] )
_lowerCAmelCase : Any = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(a__ , 23 ).numpy() , expected.numpy() )
def __A ( self ):
_lowerCAmelCase : Tuple = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
_lowerCAmelCase : Dict = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(a__ , 1 ).numpy() , expected.numpy() )
def __A ( self ):
_lowerCAmelCase : int = 101
_lowerCAmelCase : Tuple = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]] )
_lowerCAmelCase : Dict = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
_lowerCAmelCase : int = compute_token_type_ids(a__ , a__ )
np.testing.assert_array_equal(a__ , a__ )
| 44
|
"""simple docstring"""
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
_a : Dict = datasets.utils.logging.get_logger(__name__)
@dataclass
class __A ( datasets.BuilderConfig ):
_UpperCamelCase : int = 10_000
_UpperCamelCase : Optional[List[str]] = None
_UpperCamelCase : Optional[datasets.Features] = None
class __A ( datasets.ArrowBasedBuilder ):
_UpperCamelCase : List[str] = ParquetConfig
def __A ( self ):
return datasets.DatasetInfo(features=self.config.features )
def __A ( self , a__ ):
if not self.config.data_files:
raise ValueError(F"At least one data file must be specified, but got data_files={self.config.data_files}" )
_lowerCAmelCase : Optional[Any] = dl_manager.download_and_extract(self.config.data_files )
if isinstance(a__ , (str, list, tuple) ):
_lowerCAmelCase : Any = data_files
if isinstance(a__ , a__ ):
_lowerCAmelCase : Tuple = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_lowerCAmelCase : Any = [dl_manager.iter_files(a__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )]
_lowerCAmelCase : Optional[Any] = []
for split_name, files in data_files.items():
if isinstance(a__ , a__ ):
_lowerCAmelCase : Dict = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_lowerCAmelCase : Tuple = [dl_manager.iter_files(a__ ) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(a__ ):
with open(a__ , """rb""" ) as f:
_lowerCAmelCase : Optional[Any] = datasets.Features.from_arrow_schema(pq.read_schema(a__ ) )
break
splits.append(datasets.SplitGenerator(name=a__ , gen_kwargs={"""files""": files} ) )
return splits
def __A ( self , a__ ):
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
_lowerCAmelCase : Optional[int] = table_cast(a__ , self.info.features.arrow_schema )
return pa_table
def __A ( self , a__ ):
_lowerCAmelCase : Optional[int] = self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema ) != sorted(self.config.columns ):
raise ValueError(
F"Tried to load parquet data with columns '{self.config.columns}' with mismatching features '{self.info.features}'" )
for file_idx, file in enumerate(itertools.chain.from_iterable(a__ ) ):
with open(a__ , """rb""" ) as f:
_lowerCAmelCase : Tuple = pq.ParquetFile(a__ )
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ):
_lowerCAmelCase : Any = pa.Table.from_batches([record_batch] )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield F"{file_idx}_{batch_idx}", self._cast_table(a__ )
except ValueError as e:
logger.error(F"Failed to read file '{file}' with error {type(a__ )}: {e}" )
raise
| 44
| 1
|
"""simple docstring"""
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
_a : Union[str, Any] = pytest.mark.integration
@pytest.mark.parametrize("""path""" ,["""paws""", """csv"""] )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[int] ,_lowerCamelCase : Tuple ) -> List[Any]:
inspect_dataset(_lowerCamelCase ,_lowerCamelCase )
_lowerCAmelCase : List[Any] = path + """.py"""
assert script_name in os.listdir(_lowerCamelCase )
assert "__pycache__" not in os.listdir(_lowerCamelCase )
@pytest.mark.filterwarnings("""ignore:inspect_metric is deprecated:FutureWarning""" )
@pytest.mark.filterwarnings("""ignore:metric_module_factory is deprecated:FutureWarning""" )
@pytest.mark.parametrize("""path""" ,["""accuracy"""] )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Any ,_lowerCamelCase : Dict ) -> int:
inspect_metric(_lowerCamelCase ,_lowerCamelCase )
_lowerCAmelCase : Tuple = path + """.py"""
assert script_name in os.listdir(_lowerCamelCase )
assert "__pycache__" not in os.listdir(_lowerCamelCase )
@pytest.mark.parametrize(
"""path, config_name, expected_splits""" ,[
("""squad""", """plain_text""", ["""train""", """validation"""]),
("""dalle-mini/wit""", """dalle-mini--wit""", ["""train"""]),
("""paws""", """labeled_final""", ["""train""", """test""", """validation"""]),
] ,)
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Tuple ,_lowerCamelCase : Optional[int] ,_lowerCamelCase : Tuple ) -> Optional[int]:
_lowerCAmelCase : str = get_dataset_config_info(_lowerCamelCase ,config_name=_lowerCamelCase )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"""path, config_name, expected_exception""" ,[
("""paws""", None, ValueError),
] ,)
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, Any] ,_lowerCamelCase : Optional[Any] ,_lowerCamelCase : List[str] ) -> Dict:
with pytest.raises(_lowerCamelCase ):
get_dataset_config_info(_lowerCamelCase ,config_name=_lowerCamelCase )
@pytest.mark.parametrize(
"""path, expected""" ,[
("""squad""", """plain_text"""),
("""acronym_identification""", """default"""),
("""lhoestq/squad""", """plain_text"""),
("""lhoestq/test""", """default"""),
("""lhoestq/demo1""", """lhoestq--demo1"""),
("""dalle-mini/wit""", """dalle-mini--wit"""),
] ,)
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Tuple ,_lowerCamelCase : Union[str, Any] ) -> Tuple:
_lowerCAmelCase : Dict = get_dataset_config_names(_lowerCamelCase )
assert expected in config_names
@pytest.mark.parametrize(
"""path, expected_configs, expected_splits_in_first_config""" ,[
("""squad""", ["""plain_text"""], ["""train""", """validation"""]),
("""dalle-mini/wit""", ["""dalle-mini--wit"""], ["""train"""]),
("""paws""", ["""labeled_final""", """labeled_swap""", """unlabeled_final"""], ["""train""", """test""", """validation"""]),
] ,)
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int ,_lowerCamelCase : str ,_lowerCamelCase : Dict ) -> Optional[Any]:
_lowerCAmelCase : Any = get_dataset_infos(_lowerCamelCase )
assert list(infos.keys() ) == expected_configs
_lowerCAmelCase : Any = expected_configs[0]
assert expected_config in infos
_lowerCAmelCase : int = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
"""path, expected_config, expected_splits""" ,[
("""squad""", """plain_text""", ["""train""", """validation"""]),
("""dalle-mini/wit""", """dalle-mini--wit""", ["""train"""]),
("""paws""", """labeled_final""", ["""train""", """test""", """validation"""]),
] ,)
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int ,_lowerCamelCase : Union[str, Any] ,_lowerCamelCase : Optional[int] ) -> int:
_lowerCAmelCase : Optional[Any] = get_dataset_infos(_lowerCamelCase )
assert expected_config in infos
_lowerCAmelCase : str = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"""path, config_name, expected_exception""" ,[
("""paws""", None, ValueError),
] ,)
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Tuple ,_lowerCamelCase : Union[str, Any] ,_lowerCamelCase : Optional[Any] ) -> int:
with pytest.raises(_lowerCamelCase ):
get_dataset_split_names(_lowerCamelCase ,config_name=_lowerCamelCase )
| 44
|
"""simple docstring"""
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format='%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=os.environ.get('LOGLEVEL', 'INFO').upper(),
stream=sys.stdout,
)
_a : Tuple = logging.getLogger(__name__)
_a : Any = {'facebook/bart-base': BartForConditionalGeneration}
_a : List[str] = {'facebook/bart-base': BartTokenizer}
def SCREAMING_SNAKE_CASE ( ) -> int:
_lowerCAmelCase : int = argparse.ArgumentParser(description="""Export Bart model + Beam Search to ONNX graph.""" )
parser.add_argument(
"""--validation_file""" ,type=_lowerCamelCase ,default=_lowerCamelCase ,help="""A csv or a json file containing the validation data.""" )
parser.add_argument(
"""--max_length""" ,type=_lowerCamelCase ,default=5 ,help="""The maximum total input sequence length after tokenization.""" ,)
parser.add_argument(
"""--num_beams""" ,type=_lowerCamelCase ,default=_lowerCamelCase ,help=(
"""Number of beams to use for evaluation. This argument will be """
"""passed to ``model.generate``, which is used during ``evaluate`` and ``predict``."""
) ,)
parser.add_argument(
"""--model_name_or_path""" ,type=_lowerCamelCase ,help="""Path to pretrained model or model identifier from huggingface.co/models.""" ,required=_lowerCamelCase ,)
parser.add_argument(
"""--config_name""" ,type=_lowerCamelCase ,default=_lowerCamelCase ,help="""Pretrained config name or path if not the same as model_name""" ,)
parser.add_argument(
"""--device""" ,type=_lowerCamelCase ,default="""cpu""" ,help="""Device where the model will be run""" ,)
parser.add_argument("""--output_file_path""" ,type=_lowerCamelCase ,default=_lowerCamelCase ,help="""Where to store the final ONNX file.""" )
_lowerCAmelCase : Optional[Any] = parser.parse_args()
return args
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[Any] ,_lowerCamelCase : Union[str, Any]="cpu" ) -> str:
_lowerCAmelCase : List[str] = model_dict[model_name].from_pretrained(_lowerCamelCase ).to(_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = tokenizer_dict[model_name].from_pretrained(_lowerCamelCase )
if model_name in ["facebook/bart-base"]:
_lowerCAmelCase : List[Any] = 0
_lowerCAmelCase : str = None
_lowerCAmelCase : List[str] = 0
return huggingface_model, tokenizer
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, Any] ,_lowerCamelCase : List[str] ,_lowerCamelCase : int ,_lowerCamelCase : List[Any] ,_lowerCamelCase : List[str] ) -> Tuple:
model.eval()
_lowerCAmelCase : str = None
_lowerCAmelCase : int = torch.jit.script(BARTBeamSearchGenerator(_lowerCamelCase ) )
with torch.no_grad():
_lowerCAmelCase : List[Any] = """My friends are cool but they eat too many carbs."""
_lowerCAmelCase : Union[str, Any] = tokenizer([ARTICLE_TO_SUMMARIZE] ,max_length=1024 ,return_tensors="""pt""" ).to(model.device )
_lowerCAmelCase : Any = model.generate(
inputs["""input_ids"""] ,attention_mask=inputs["""attention_mask"""] ,num_beams=_lowerCamelCase ,max_length=_lowerCamelCase ,early_stopping=_lowerCamelCase ,decoder_start_token_id=model.config.decoder_start_token_id ,)
torch.onnx.export(
_lowerCamelCase ,(
inputs["""input_ids"""],
inputs["""attention_mask"""],
num_beams,
max_length,
model.config.decoder_start_token_id,
) ,_lowerCamelCase ,opset_version=14 ,input_names=["""input_ids""", """attention_mask""", """num_beams""", """max_length""", """decoder_start_token_id"""] ,output_names=["""output_ids"""] ,dynamic_axes={
"""input_ids""": {0: """batch""", 1: """seq"""},
"""output_ids""": {0: """batch""", 1: """seq_out"""},
} ,example_outputs=_lowerCamelCase ,)
logger.info("""Model exported to {}""".format(_lowerCamelCase ) )
_lowerCAmelCase : List[str] = remove_dup_initializers(os.path.abspath(_lowerCamelCase ) )
logger.info("""Deduplicated and optimized model written to {}""".format(_lowerCamelCase ) )
_lowerCAmelCase : str = onnxruntime.InferenceSession(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = ort_sess.run(
_lowerCamelCase ,{
"""input_ids""": inputs["""input_ids"""].cpu().numpy(),
"""attention_mask""": inputs["""attention_mask"""].cpu().numpy(),
"""num_beams""": np.array(_lowerCamelCase ),
"""max_length""": np.array(_lowerCamelCase ),
"""decoder_start_token_id""": np.array(model.config.decoder_start_token_id ),
} ,)
np.testing.assert_allclose(summary_ids.cpu().numpy() ,ort_out[0] ,rtol=1e-3 ,atol=1e-3 )
logger.info("""Model outputs from torch and ONNX Runtime are similar.""" )
logger.info("""Success.""" )
def SCREAMING_SNAKE_CASE ( ) -> Any:
_lowerCAmelCase : Any = parse_args()
_lowerCAmelCase : List[Any] = 5
_lowerCAmelCase : str = 4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" ,datefmt="""%m/%d/%Y %H:%M:%S""" ,level=logging.INFO ,)
logger.setLevel(logging.INFO )
transformers.utils.logging.set_verbosity_error()
_lowerCAmelCase : Optional[Any] = torch.device(args.device )
_lowerCAmelCase , _lowerCAmelCase : List[str] = load_model_tokenizer(args.model_name_or_path ,_lowerCamelCase )
if model.config.decoder_start_token_id is None:
raise ValueError("""Make sure that `config.decoder_start_token_id` is correctly defined""" )
model.to(_lowerCamelCase )
if args.max_length:
_lowerCAmelCase : Dict = args.max_length
if args.num_beams:
_lowerCAmelCase : Dict = args.num_beams
if args.output_file_path:
_lowerCAmelCase : Any = args.output_file_path
else:
_lowerCAmelCase : Union[str, Any] = """BART.onnx"""
logger.info("""Exporting model to ONNX""" )
export_and_validate_model(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
if __name__ == "__main__":
main()
| 44
| 1
|
"""simple docstring"""
import copy
import tempfile
import unittest
from transformers import MaMaaaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from transformers.utils import cached_property
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaTokenizer
from transformers.models.mam_aaa.modeling_mam_aaa import MaMaaaDecoder, MaMaaaEncoder
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Tuple ,_lowerCamelCase : List[str] ,_lowerCamelCase : Tuple ,_lowerCamelCase : Optional[int]=None ,_lowerCamelCase : List[str]=None ,_lowerCamelCase : str=None ,_lowerCamelCase : List[str]=None ,_lowerCamelCase : Any=None ,) -> Dict:
if attention_mask is None:
_lowerCAmelCase : int = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
_lowerCAmelCase : Optional[Any] = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
_lowerCAmelCase : str = torch.ones(config.encoder_layers ,config.encoder_attention_heads ,device=_lowerCamelCase )
if decoder_head_mask is None:
_lowerCAmelCase : Any = torch.ones(config.decoder_layers ,config.decoder_attention_heads ,device=_lowerCamelCase )
if cross_attn_head_mask is None:
_lowerCAmelCase : Optional[int] = torch.ones(config.decoder_layers ,config.decoder_attention_heads ,device=_lowerCamelCase )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
class __A :
def __init__( self , a__ , a__=13 , a__=7 , a__=True , a__=False , a__=99 , a__=16 , a__=2 , a__=4 , a__=4 , a__="relu" , a__=0.1 , a__=0.1 , a__=0.0 , a__=0.0 , a__=20 , a__=2 , a__=1 , a__=0 , ):
_lowerCAmelCase : int = parent
_lowerCAmelCase : Optional[Any] = batch_size
_lowerCAmelCase : int = seq_length
_lowerCAmelCase : List[str] = is_training
_lowerCAmelCase : int = use_labels
_lowerCAmelCase : Any = vocab_size
_lowerCAmelCase : Dict = hidden_size
_lowerCAmelCase : int = num_hidden_layers
_lowerCAmelCase : List[Any] = num_attention_heads
_lowerCAmelCase : List[str] = intermediate_size
_lowerCAmelCase : List[Any] = hidden_act
_lowerCAmelCase : Union[str, Any] = hidden_dropout_prob
_lowerCAmelCase : Tuple = attention_probs_dropout_prob
_lowerCAmelCase : List[str] = encoder_layerdrop
_lowerCAmelCase : Union[str, Any] = decoder_layerdrop
_lowerCAmelCase : List[Any] = max_position_embeddings
_lowerCAmelCase : List[str] = eos_token_id
_lowerCAmelCase : Tuple = pad_token_id
_lowerCAmelCase : int = bos_token_id
def __A ( self ):
_lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase : List[Any] = self.eos_token_id # Eos Token
_lowerCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for M2M100 the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
_lowerCAmelCase : Optional[Any] = input_ids.clamp(self.pad_token_id + 1 )
_lowerCAmelCase : Tuple = decoder_input_ids.clamp(self.pad_token_id + 1 )
_lowerCAmelCase : List[Any] = self.get_config()
_lowerCAmelCase : Dict = prepare_mam_aaa_inputs_dict(a__ , a__ , a__ )
return config, inputs_dict
def __A ( self ):
return MaMaaaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , encoder_layerdrop=self.encoder_layerdrop , decoder_layerdrop=self.decoder_layerdrop , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , )
def __A ( self ):
_lowerCAmelCase , _lowerCAmelCase : List[Any] = self.prepare_config_and_inputs()
return config, inputs_dict
def __A ( self , a__ , a__ ):
_lowerCAmelCase : List[str] = MaMaaaModel(config=a__ ).get_decoder().to(a__ ).eval()
_lowerCAmelCase : str = inputs_dict["""input_ids"""]
_lowerCAmelCase : Dict = inputs_dict["""attention_mask"""]
_lowerCAmelCase : int = inputs_dict["""head_mask"""]
# first forward pass
_lowerCAmelCase : Tuple = model(a__ , attention_mask=a__ , head_mask=a__ , use_cache=a__ )
_lowerCAmelCase , _lowerCAmelCase : List[Any] = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
_lowerCAmelCase : Tuple = ids_tensor((self.batch_size, 3) , config.vocab_size )
_lowerCAmelCase : Union[str, Any] = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
_lowerCAmelCase : Optional[Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
_lowerCAmelCase : Union[str, Any] = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
_lowerCAmelCase : Optional[Any] = model(a__ , attention_mask=a__ )["""last_hidden_state"""]
_lowerCAmelCase : Any = model(a__ , attention_mask=a__ , past_key_values=a__ )[
"""last_hidden_state"""
]
# select random slice
_lowerCAmelCase : List[str] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_lowerCAmelCase : int = output_from_no_past[:, -3:, random_slice_idx].detach()
_lowerCAmelCase : Optional[int] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(a__ , a__ , atol=1e-2 ) )
def __A ( self , a__ , a__ ):
_lowerCAmelCase : Tuple = MaMaaaModel(config=a__ ).to(a__ ).eval()
_lowerCAmelCase : str = model(**a__ )
_lowerCAmelCase : Optional[Any] = outputs.encoder_last_hidden_state
_lowerCAmelCase : List[Any] = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCAmelCase : str = model.get_encoder()
encoder.save_pretrained(a__ )
_lowerCAmelCase : str = MaMaaaEncoder.from_pretrained(a__ ).to(a__ )
_lowerCAmelCase : Tuple = encoder(inputs_dict["""input_ids"""] , attention_mask=inputs_dict["""attention_mask"""] )[
0
]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 )
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCAmelCase : List[str] = model.get_decoder()
decoder.save_pretrained(a__ )
_lowerCAmelCase : int = MaMaaaDecoder.from_pretrained(a__ ).to(a__ )
_lowerCAmelCase : Any = decoder(
input_ids=inputs_dict["""decoder_input_ids"""] , attention_mask=inputs_dict["""decoder_attention_mask"""] , encoder_hidden_states=a__ , encoder_attention_mask=inputs_dict["""attention_mask"""] , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 )
@require_torch
class __A ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : Tuple = (
(
MaMaaaModel,
MaMaaaForConditionalGeneration,
)
if is_torch_available()
else ()
)
_UpperCamelCase : Tuple = (MaMaaaForConditionalGeneration,) if is_torch_available() else ()
_UpperCamelCase : Union[str, Any] = (
{
"conversational": MaMaaaForConditionalGeneration,
"feature-extraction": MaMaaaModel,
"summarization": MaMaaaForConditionalGeneration,
"text2text-generation": MaMaaaForConditionalGeneration,
"translation": MaMaaaForConditionalGeneration,
}
if is_torch_available()
else {}
)
_UpperCamelCase : Optional[int] = True
_UpperCamelCase : Any = True
_UpperCamelCase : Any = False
_UpperCamelCase : List[Any] = False
def __A ( self , a__ , a__ , a__ , a__ , a__ ):
if pipeline_test_casse_name == "TranslationPipelineTests":
# Get `ValueError: Translation requires a `src_lang` and a `tgt_lang` for this model`.
# `M2M100Config` was never used in pipeline tests: cannot create a simple tokenizer.
return True
return False
def __A ( self ):
_lowerCAmelCase : str = MaMaaaModelTester(self )
_lowerCAmelCase : Optional[int] = ConfigTester(self , config_class=a__ )
def __A ( self ):
self.config_tester.run_common_tests()
def __A ( self ):
_lowerCAmelCase , _lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
_lowerCAmelCase : Any = model_class(a__ )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(a__ )
_lowerCAmelCase , _lowerCAmelCase : List[str] = model_class.from_pretrained(a__ , output_loading_info=a__ )
self.assertEqual(info["""missing_keys"""] , [] )
def __A ( self ):
_lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*a__ )
def __A ( self ):
_lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*a__ )
def __A ( self ):
_lowerCAmelCase , _lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in (MaMaaaModel, MaMaaaForConditionalGeneration):
_lowerCAmelCase : List[str] = model_class(a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : List[Any] = copy.deepcopy(self._prepare_for_class(a__ , a__ ) )
if not self.is_encoder_decoder:
_lowerCAmelCase : Union[str, Any] = inputs["""input_ids"""]
del inputs["input_ids"]
else:
_lowerCAmelCase : Tuple = inputs["""input_ids"""]
_lowerCAmelCase : Union[str, Any] = inputs.get("""decoder_input_ids""" , a__ )
del inputs["input_ids"]
inputs.pop("""decoder_input_ids""" , a__ )
_lowerCAmelCase : Tuple = model.get_input_embeddings()
if not self.is_encoder_decoder:
_lowerCAmelCase : Optional[int] = wte(a__ )
else:
_lowerCAmelCase : Dict = wte(a__ )
_lowerCAmelCase : Dict = wte(a__ )
with torch.no_grad():
model(**a__ )[0]
def __A ( self ):
_lowerCAmelCase , _lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
_lowerCAmelCase : List[Any] = input_dict["""input_ids"""]
_lowerCAmelCase : Union[str, Any] = input_ids.ne(1 ).to(a__ )
_lowerCAmelCase : List[str] = MaMaaaForConditionalGeneration(a__ ).eval().to(a__ )
if torch_device == "cuda":
model.half()
model.generate(a__ , attention_mask=a__ )
model.generate(num_beams=4 , do_sample=a__ , early_stopping=a__ , num_return_sequences=3 )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : str ) -> Tuple:
return torch.tensor(_lowerCamelCase ,dtype=torch.long ,device=_lowerCamelCase )
_a : Optional[Any] = 1e-4
@require_torch
@require_sentencepiece
@require_tokenizers
@slow
class __A ( unittest.TestCase ):
@cached_property
def __A ( self ):
return MaMaaaTokenizer.from_pretrained("""facebook/m2m100_418M""" )
def __A ( self ):
_lowerCAmelCase : List[Any] = MaMaaaModel.from_pretrained("""facebook/m2m100_418M""" ).to(a__ )
_lowerCAmelCase : Optional[Any] = _long_tensor([[128028, 98, 12, 30527, 2732, 159, 7755, 61904, 39144, 38, 2]] )
_lowerCAmelCase : Union[str, Any] = _long_tensor([[2, 128028, 98, 12, 30527, 2732, 159, 7755, 61904, 39144, 38]] )
_lowerCAmelCase : Union[str, Any] = prepare_mam_aaa_inputs_dict(model.config , a__ , a__ )
with torch.no_grad():
_lowerCAmelCase : Tuple = model(**a__ )[0]
_lowerCAmelCase : Union[str, Any] = torch.Size((1, 11, 1024) )
self.assertEqual(output.shape , a__ )
# change to expected output here
_lowerCAmelCase : Tuple = torch.tensor(
[[-0.7_7_8_0, -0.1_6_7_6, 0.1_0_3_8], [-6.7_5_5_6, -1.3_9_9_2, 0.0_5_6_7], [-7.5_3_8_3, -0.5_9_2_0, -0.2_7_7_9]] , device=a__ )
self.assertTrue(torch.allclose(output[:, :3, :3] , a__ , atol=a__ ) )
def __A ( self ):
_lowerCAmelCase : Union[str, Any] = MaMaaaForConditionalGeneration.from_pretrained("""facebook/m2m100_418M""" ).to(a__ )
# change to intended input
_lowerCAmelCase : Dict = _long_tensor([[128028, 98, 12, 30527, 2732, 159, 7755, 61904, 39144, 38, 2]] )
_lowerCAmelCase : Union[str, Any] = _long_tensor([[2, 128028, 98, 12, 30527, 2732, 159, 7755, 61904, 39144, 38]] )
_lowerCAmelCase : str = prepare_mam_aaa_inputs_dict(model.config , a__ , a__ )
with torch.no_grad():
_lowerCAmelCase : str = model(**a__ )[0]
_lowerCAmelCase : int = torch.Size((1, 11, model.config.vocab_size) )
self.assertEqual(output.shape , a__ )
# change to expected output here
_lowerCAmelCase : List[Any] = torch.tensor(
[[-1.0_4_4_8, -1.0_4_1_1, 3.7_9_9_2], [-3.2_1_9_1, -3.2_3_8_6, -1.3_4_5_1], [-3.6_2_1_0, -3.5_9_9_3, 0.4_9_2_5]] , device=a__ )
self.assertTrue(torch.allclose(output[:, :3, :3] , a__ , atol=a__ ) )
def __A ( self ):
_lowerCAmelCase : Any = MaMaaaForConditionalGeneration.from_pretrained("""facebook/m2m100_418M""" ).to(a__ )
_lowerCAmelCase : List[str] = MaMaaaTokenizer.from_pretrained("""facebook/m2m100_418M""" , src_lang="""fr""" , tgt_lang="""en""" )
_lowerCAmelCase : int = [
"""L'affaire NSA souligne l'absence totale de débat sur le renseignement""",
"""Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.""",
"""Lorsque François Hollande téléphone à Barack Obama ou quand le ministre des affaires étrangères Laurent"""
""" Fabius convoque l'ambassadeur des Etats-Unis, ils réagissent à une vraie découverte, qui est celle de"""
""" l'ampleur de la surveillance américaine sur l'ensemble des communications en France.""",
]
# The below article tests that we don't add any hypotheses outside of the top n_beams
_lowerCAmelCase : Optional[Any] = tokenizer(a__ , padding=a__ , return_tensors="""pt""" )
_lowerCAmelCase : Tuple = model.generate(
input_ids=dct["""input_ids"""].to(a__ ) , attention_mask=dct["""attention_mask"""].to(a__ ) , num_beams=5 , forced_bos_token_id=tokenizer.get_lang_id("""en""" ) , )
_lowerCAmelCase : Optional[Any] = [
"""The NSA case highlights the total absence of intelligence debate""",
"""I think there are two levels of response from the French government.""",
"""When François Hollande calls Barack Obama or when Foreign Minister Laurent Fabius calls the U.S."""
""" Ambassador, they respond to a real discovery, which is that of the scale of U.S. surveillance on all"""
""" communications in France.""",
]
_lowerCAmelCase : List[str] = tokenizer.batch_decode(
hypotheses_batch.tolist() , clean_up_tokenization_spaces=a__ , skip_special_tokens=a__ )
assert generated == expected_en
| 44
|
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Any ) -> List[Any]: # noqa: E741
_lowerCAmelCase : Optional[int] = len(_lowerCamelCase )
_lowerCAmelCase : str = 0
_lowerCAmelCase : Any = [0] * n
_lowerCAmelCase : str = [False] * n
_lowerCAmelCase : str = [False] * n
def dfs(_lowerCamelCase : Tuple ,_lowerCamelCase : Union[str, Any] ,_lowerCamelCase : Union[str, Any] ,_lowerCamelCase : str ):
if parent == root:
out_edge_count += 1
_lowerCAmelCase : Any = True
_lowerCAmelCase : int = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
_lowerCAmelCase : Union[str, Any] = dfs(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
_lowerCAmelCase : Optional[int] = min(low[at] ,low[to] )
# AP found via bridge
if at < low[to]:
_lowerCAmelCase : int = True
# AP found via cycle
if at == low[to]:
_lowerCAmelCase : Tuple = True
else:
_lowerCAmelCase : Union[str, Any] = min(low[at] ,_lowerCamelCase )
return out_edge_count
for i in range(_lowerCamelCase ):
if not visited[i]:
_lowerCAmelCase : int = 0
_lowerCAmelCase : Dict = dfs(_lowerCamelCase ,_lowerCamelCase ,-1 ,_lowerCamelCase )
_lowerCAmelCase : List[str] = out_edge_count > 1
for x in range(len(_lowerCamelCase ) ):
if is_art[x] is True:
print(_lowerCamelCase )
# Adjacency list of graph
_a : Optional[Any] = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data)
| 44
| 1
|
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : list ) -> list:
if len(_lowerCamelCase ) <= 1:
return [tuple(_lowerCamelCase )]
_lowerCAmelCase : Dict = []
def generate(_lowerCamelCase : int ,_lowerCamelCase : list ):
if k == 1:
res.append(tuple(arr[:] ) )
return
generate(k - 1 ,_lowerCamelCase )
for i in range(k - 1 ):
if k % 2 == 0: # k is even
_lowerCAmelCase , _lowerCAmelCase : List[Any] = arr[k - 1], arr[i]
else: # k is odd
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = arr[k - 1], arr[0]
generate(k - 1 ,_lowerCamelCase )
generate(len(_lowerCamelCase ) ,_lowerCamelCase )
return res
if __name__ == "__main__":
_a : Tuple = input('Enter numbers separated by a comma:\n').strip()
_a : Optional[Any] = [int(item) for item in user_input.split(',')]
print(heaps(arr))
| 44
|
"""simple docstring"""
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_a : Optional[int] = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class __A ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : List[Any] = XGLMTokenizer
_UpperCamelCase : List[Any] = XGLMTokenizerFast
_UpperCamelCase : Dict = True
_UpperCamelCase : Tuple = True
def __A ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
_lowerCAmelCase : List[Any] = XGLMTokenizer(a__ , keep_accents=a__ )
tokenizer.save_pretrained(self.tmpdirname )
def __A ( self ):
_lowerCAmelCase : List[str] = """<pad>"""
_lowerCAmelCase : List[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a__ ) , a__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a__ ) , a__ )
def __A ( self ):
_lowerCAmelCase : Optional[int] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(len(a__ ) , 1008 )
def __A ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 1008 )
def __A ( self ):
_lowerCAmelCase : List[Any] = XGLMTokenizer(a__ , keep_accents=a__ )
_lowerCAmelCase : Dict = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(a__ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(a__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
_lowerCAmelCase : Any = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
a__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
_lowerCAmelCase : List[str] = tokenizer.convert_tokens_to_ids(a__ )
self.assertListEqual(
a__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
_lowerCAmelCase : Optional[int] = tokenizer.convert_ids_to_tokens(a__ )
self.assertListEqual(
a__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
@cached_property
def __A ( self ):
return XGLMTokenizer.from_pretrained("""facebook/xglm-564M""" )
def __A ( self ):
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(a__ , f.name )
_lowerCAmelCase : Union[str, Any] = XGLMTokenizer(f.name , keep_accents=a__ )
_lowerCAmelCase : List[str] = pickle.dumps(a__ )
pickle.loads(a__ )
def __A ( self ):
if not self.test_rust_tokenizer:
return
_lowerCAmelCase : List[str] = self.get_tokenizer()
_lowerCAmelCase : Optional[Any] = self.get_rust_tokenizer()
_lowerCAmelCase : Tuple = """I was born in 92000, and this is falsé."""
_lowerCAmelCase : List[Any] = tokenizer.tokenize(a__ )
_lowerCAmelCase : Tuple = rust_tokenizer.tokenize(a__ )
self.assertListEqual(a__ , a__ )
_lowerCAmelCase : Union[str, Any] = tokenizer.encode(a__ , add_special_tokens=a__ )
_lowerCAmelCase : str = rust_tokenizer.encode(a__ , add_special_tokens=a__ )
self.assertListEqual(a__ , a__ )
_lowerCAmelCase : int = self.get_rust_tokenizer()
_lowerCAmelCase : Dict = tokenizer.encode(a__ )
_lowerCAmelCase : List[Any] = rust_tokenizer.encode(a__ )
self.assertListEqual(a__ , a__ )
@slow
def __A ( self ):
_lowerCAmelCase : int = """Hello World!"""
_lowerCAmelCase : Optional[int] = [2, 31227, 4447, 35]
self.assertListEqual(a__ , self.big_tokenizer.encode(a__ ) )
@slow
def __A ( self ):
_lowerCAmelCase : Any = (
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth"""
)
# fmt: off
_lowerCAmelCase : List[str] = [2, 1018, 67, 11, 1988, 2617, 5631, 278, 11, 3407, 48, 71630, 28085, 4, 3234, 157, 13, 6, 5, 6, 4, 3526, 768, 15, 659, 57, 298, 3983, 864, 129, 21, 6, 5, 13675, 377, 652, 7580, 10341, 155, 2817, 422, 1666, 7, 1674, 53, 113, 202277, 17892, 33, 60, 87, 4, 3234, 157, 61, 2667, 52376, 19, 88, 23, 735]
# fmt: on
self.assertListEqual(a__ , self.big_tokenizer.encode(a__ ) )
@slow
def __A ( self ):
# fmt: off
_lowerCAmelCase : List[str] = {
"""input_ids""": [[2, 108825, 1163, 15, 88010, 473, 15898, 157, 13672, 1857, 312, 8, 238021, 1163, 53, 13672, 1857, 312, 8, 53283, 182396, 8, 18566, 16, 36733, 4101, 8, 230, 244017, 122553, 7, 15, 132597, 4, 293, 12511, 7610, 4, 3414, 132597, 9, 4, 32361, 362, 4, 734, 28512, 32569, 18, 4, 32361, 26096, 14982, 73, 18715, 21433, 235261, 15, 492, 12427, 16, 53, 18715, 21433, 65454, 15, 23659, 563, 16, 278, 597, 2843, 595, 7931, 182396, 64186, 22, 886, 595, 132981, 53, 25540, 3449, 43982, 39901, 5951, 878, 330, 4, 27694, 80269, 312, 53, 6517, 11780, 611, 20408, 5], [2, 6, 132597, 67, 42897, 33, 592, 8, 163729, 25540, 361, 136997, 109514, 173230, 7, 501, 60, 102913, 196, 5631, 235, 63243, 473, 6, 231757, 74, 5277, 7905, 53, 3095, 37317, 22, 454, 183874, 5], [2, 268, 31298, 46530, 6, 132935, 43831, 7, 597, 32, 24, 3688, 9865, 5]],
"""attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=a__ , model_name="""facebook/xglm-564M""" , padding=a__ , )
| 44
| 1
|
"""simple docstring"""
from __future__ import annotations
from math import pow, sqrt
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : float ,_lowerCamelCase : float ,_lowerCamelCase : float ) -> dict[str, float]:
if (resistance, reactance, impedance).count(0 ) != 1:
raise ValueError("""One and only one argument must be 0""" )
if resistance == 0:
return {"resistance": sqrt(pow(_lowerCamelCase ,2 ) - pow(_lowerCamelCase ,2 ) )}
elif reactance == 0:
return {"reactance": sqrt(pow(_lowerCamelCase ,2 ) - pow(_lowerCamelCase ,2 ) )}
elif impedance == 0:
return {"impedance": sqrt(pow(_lowerCamelCase ,2 ) + pow(_lowerCamelCase ,2 ) )}
else:
raise ValueError("""Exactly one argument must be 0""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 44
|
"""simple docstring"""
from itertools import product
from cva import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey
from numpy import dot, exp, mgrid, pi, ravel, square, uinta, zeros
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[Any] ,_lowerCamelCase : int ) -> List[str]:
_lowerCAmelCase : Tuple = k_size // 2
_lowerCAmelCase , _lowerCAmelCase : List[str] = mgrid[0 - center : k_size - center, 0 - center : k_size - center]
_lowerCAmelCase : Union[str, Any] = 1 / (2 * pi * sigma) * exp(-(square(_lowerCamelCase ) + square(_lowerCamelCase )) / (2 * square(_lowerCamelCase )) )
return g
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Dict ,_lowerCamelCase : int ,_lowerCamelCase : int ) -> Dict:
_lowerCAmelCase , _lowerCAmelCase : str = image.shape[0], image.shape[1]
# dst image height and width
_lowerCAmelCase : Optional[int] = height - k_size + 1
_lowerCAmelCase : Dict = width - k_size + 1
# im2col, turn the k_size*k_size pixels into a row and np.vstack all rows
_lowerCAmelCase : Tuple = zeros((dst_height * dst_width, k_size * k_size) )
_lowerCAmelCase : int = 0
for i, j in product(range(_lowerCamelCase ) ,range(_lowerCamelCase ) ):
_lowerCAmelCase : Any = ravel(image[i : i + k_size, j : j + k_size] )
_lowerCAmelCase : Union[str, Any] = window
row += 1
# turn the kernel into shape(k*k, 1)
_lowerCAmelCase : List[Any] = gen_gaussian_kernel(_lowerCamelCase ,_lowerCamelCase )
_lowerCAmelCase : str = ravel(_lowerCamelCase )
# reshape and get the dst image
_lowerCAmelCase : int = dot(_lowerCamelCase ,_lowerCamelCase ).reshape(_lowerCamelCase ,_lowerCamelCase ).astype(_lowerCamelCase )
return dst
if __name__ == "__main__":
# read original image
_a : Optional[Any] = imread(r'../image_data/lena.jpg')
# turn image in gray scale value
_a : Dict = cvtColor(img, COLOR_BGR2GRAY)
# get values with two different mask size
_a : Union[str, Any] = gaussian_filter(gray, 3, sigma=1)
_a : List[Any] = gaussian_filter(gray, 5, sigma=0.8)
# show result images
imshow('gaussian filter with 3x3 mask', gaussianaxa)
imshow('gaussian filter with 5x5 mask', gaussianaxa)
waitKey()
| 44
| 1
|
"""simple docstring"""
import re
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : str ) -> bool:
_lowerCAmelCase : str = re.compile(r"""^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$""" )
if match := re.search(_lowerCamelCase ,_lowerCamelCase ):
return match.string == phone
return False
if __name__ == "__main__":
print(indian_phone_validator('+918827897895'))
| 44
|
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_electra import ElectraTokenizer
_a : List[Any] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
_a : Union[str, Any] = {
'vocab_file': {
'google/electra-small-generator': (
'https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt'
),
'google/electra-base-generator': 'https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt',
'google/electra-large-generator': (
'https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt'
),
'google/electra-small-discriminator': (
'https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt'
),
'google/electra-base-discriminator': (
'https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt'
),
'google/electra-large-discriminator': (
'https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'google/electra-small-generator': (
'https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json'
),
'google/electra-base-generator': (
'https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json'
),
'google/electra-large-generator': (
'https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json'
),
'google/electra-small-discriminator': (
'https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json'
),
'google/electra-base-discriminator': (
'https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json'
),
'google/electra-large-discriminator': (
'https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json'
),
},
}
_a : Optional[Any] = {
'google/electra-small-generator': 512,
'google/electra-base-generator': 512,
'google/electra-large-generator': 512,
'google/electra-small-discriminator': 512,
'google/electra-base-discriminator': 512,
'google/electra-large-discriminator': 512,
}
_a : Any = {
'google/electra-small-generator': {'do_lower_case': True},
'google/electra-base-generator': {'do_lower_case': True},
'google/electra-large-generator': {'do_lower_case': True},
'google/electra-small-discriminator': {'do_lower_case': True},
'google/electra-base-discriminator': {'do_lower_case': True},
'google/electra-large-discriminator': {'do_lower_case': True},
}
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Tuple = VOCAB_FILES_NAMES
_UpperCamelCase : List[Any] = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : List[Any] = PRETRAINED_INIT_CONFIGURATION
_UpperCamelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : Optional[Any] = ElectraTokenizer
def __init__( self , a__=None , a__=None , a__=True , a__="[UNK]" , a__="[SEP]" , a__="[PAD]" , a__="[CLS]" , a__="[MASK]" , a__=True , a__=None , **a__ , ):
super().__init__(
a__ , tokenizer_file=a__ , do_lower_case=a__ , unk_token=a__ , sep_token=a__ , pad_token=a__ , cls_token=a__ , mask_token=a__ , tokenize_chinese_chars=a__ , strip_accents=a__ , **a__ , )
_lowerCAmelCase : int = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , a__ ) != do_lower_case
or normalizer_state.get("""strip_accents""" , a__ ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , a__ ) != tokenize_chinese_chars
):
_lowerCAmelCase : Dict = getattr(a__ , normalizer_state.pop("""type""" ) )
_lowerCAmelCase : int = do_lower_case
_lowerCAmelCase : str = strip_accents
_lowerCAmelCase : Dict = tokenize_chinese_chars
_lowerCAmelCase : str = normalizer_class(**a__ )
_lowerCAmelCase : List[str] = do_lower_case
def __A ( self , a__ , a__=None ):
_lowerCAmelCase : int = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __A ( self , a__ , a__ = None ):
_lowerCAmelCase : List[str] = [self.sep_token_id]
_lowerCAmelCase : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __A ( self , a__ , a__ = None ):
_lowerCAmelCase : Optional[Any] = self._tokenizer.model.save(a__ , name=a__ )
return tuple(a__ )
| 44
| 1
|
"""simple docstring"""
# Copyright (c) 2021-, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
####################################################################################################
#
# Note: If when running this conversion script you're getting an exception:
# ModuleNotFoundError: No module named 'megatron.model.enums'
# you need to tell python where to find the clone of Megatron-LM, e.g.:
#
# cd /tmp
# git clone https://github.com/NVIDIA/Megatron-LM
# PYTHONPATH=/tmp/Megatron-LM python src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py ...
#
# if you already have it cloned elsewhere, simply adjust the path to the existing path
#
# If the training was done using a Megatron-LM fork, e.g.,
# https://github.com/microsoft/Megatron-DeepSpeed/ then chances are that you need to have that one
# in your path, i.e., /path/to/Megatron-DeepSpeed/
#
import argparse
import os
import re
import zipfile
import torch
from transformers import AutoTokenizer, GPTaConfig
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[Any] ,_lowerCamelCase : Union[str, Any] ,_lowerCamelCase : Optional[int]=0 ) -> Dict:
# Format the message.
if name is None:
_lowerCAmelCase : int = None
else:
_lowerCAmelCase : int = """.""" * max(0 ,spaces - 2 ) + """# {:""" + str(50 - spaces ) + """s}"""
_lowerCAmelCase : Optional[int] = fmt.format(_lowerCamelCase )
# Print and recurse (if needed).
if isinstance(_lowerCamelCase ,_lowerCamelCase ):
if msg is not None:
print(_lowerCamelCase )
for k in val.keys():
recursive_print(_lowerCamelCase ,val[k] ,spaces + 2 )
elif isinstance(_lowerCamelCase ,torch.Tensor ):
print(_lowerCamelCase ,""":""" ,val.size() )
else:
print(_lowerCamelCase ,""":""" ,_lowerCamelCase )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[Any] ,_lowerCamelCase : Optional[int] ,_lowerCamelCase : int ,_lowerCamelCase : Dict ,_lowerCamelCase : Dict ) -> Tuple:
# Permutes layout of param tensor to [num_splits * num_heads * hidden_size, :]
# for compatibility with later versions of NVIDIA Megatron-LM.
# The inverse operation is performed inside Megatron-LM to read checkpoints:
# https://github.com/NVIDIA/Megatron-LM/blob/v2.4/megatron/checkpointing.py#L209
# If param is the weight tensor of the self-attention block, the returned tensor
# will have to be transposed one more time to be read by HuggingFace GPT2.
_lowerCAmelCase : int = param.size()
if checkpoint_version == 1.0:
# version 1.0 stores [num_heads * hidden_size * num_splits, :]
_lowerCAmelCase : Union[str, Any] = (num_heads, hidden_size, num_splits) + input_shape[1:]
_lowerCAmelCase : List[str] = param.view(*_lowerCamelCase )
_lowerCAmelCase : Tuple = param.transpose(0 ,2 )
_lowerCAmelCase : List[Any] = param.transpose(1 ,2 ).contiguous()
elif checkpoint_version >= 2.0:
# other versions store [num_heads * num_splits * hidden_size, :]
_lowerCAmelCase : List[Any] = (num_heads, num_splits, hidden_size) + input_shape[1:]
_lowerCAmelCase : str = param.view(*_lowerCamelCase )
_lowerCAmelCase : int = param.transpose(0 ,1 ).contiguous()
_lowerCAmelCase : List[Any] = param.view(*_lowerCamelCase )
return param
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[Any] ,_lowerCamelCase : List[Any] ,_lowerCamelCase : Dict ) -> Optional[int]:
# The converted output model.
_lowerCAmelCase : Tuple = {}
# old versions did not store training args
_lowerCAmelCase : List[Any] = input_state_dict.get("""args""" ,_lowerCamelCase )
if ds_args is not None:
# do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint
# from pprint import pprint
# pprint(vars(ds_args))
_lowerCAmelCase : Any = ds_args.padded_vocab_size
_lowerCAmelCase : Tuple = ds_args.max_position_embeddings
_lowerCAmelCase : Dict = ds_args.hidden_size
_lowerCAmelCase : Dict = ds_args.num_layers
_lowerCAmelCase : str = ds_args.num_attention_heads
_lowerCAmelCase : List[Any] = ds_args.ffn_hidden_size
# pprint(config)
# The number of heads.
_lowerCAmelCase : Tuple = config.n_head
# The hidden_size per head.
_lowerCAmelCase : int = config.n_embd // config.n_head
# Megatron-LM checkpoint version
if "checkpoint_version" in input_state_dict.keys():
_lowerCAmelCase : List[Any] = input_state_dict["""checkpoint_version"""]
else:
_lowerCAmelCase : Union[str, Any] = 0.0
# The model.
_lowerCAmelCase : Tuple = input_state_dict["""model"""]
# The language model.
_lowerCAmelCase : List[Any] = model["""language_model"""]
# The embeddings.
_lowerCAmelCase : List[str] = lm["""embedding"""]
# The word embeddings.
_lowerCAmelCase : Union[str, Any] = embeddings["""word_embeddings"""]["""weight"""]
# Truncate the embedding table to vocab_size rows.
_lowerCAmelCase : Tuple = word_embeddings[: config.vocab_size, :]
_lowerCAmelCase : Tuple = word_embeddings
# The position embeddings.
_lowerCAmelCase : List[str] = embeddings["""position_embeddings"""]["""weight"""]
# Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size]
_lowerCAmelCase : int = pos_embeddings.size(0 )
if n_positions != config.n_positions:
raise ValueError(
f"pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don't match" )
# Store the position embeddings.
_lowerCAmelCase : Tuple = pos_embeddings
# The transformer.
_lowerCAmelCase : List[Any] = lm["""transformer"""] if """transformer""" in lm.keys() else lm["""encoder"""]
# The regex to extract layer names.
_lowerCAmelCase : Optional[Any] = re.compile(r"""layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)""" )
# The simple map of names for "automated" rules.
_lowerCAmelCase : Any = {
"""attention.dense""": """.attn.c_proj.""",
"""self_attention.dense""": """.attn.c_proj.""",
"""mlp.dense_h_to_4h""": """.mlp.c_fc.""",
"""mlp.dense_4h_to_h""": """.mlp.c_proj.""",
}
# Extract the layers.
for key, val in transformer.items():
# Match the name.
_lowerCAmelCase : Optional[Any] = layer_re.match(_lowerCamelCase )
# Stop if that's not a layer
if m is None:
break
# The index of the layer.
_lowerCAmelCase : Any = int(m.group(1 ) )
# The name of the operation.
_lowerCAmelCase : Any = m.group(2 )
# Is it a weight or a bias?
_lowerCAmelCase : Optional[Any] = m.group(3 )
# The name of the layer.
_lowerCAmelCase : Union[str, Any] = f"transformer.h.{layer_idx}"
# For layernorm(s), simply store the layer norm.
if op_name.endswith("""layernorm""" ):
_lowerCAmelCase : Optional[int] = """ln_1""" if op_name.startswith("""input""" ) else """ln_2"""
_lowerCAmelCase : Union[str, Any] = val
# Transpose the QKV matrix.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "weight":
# Insert a tensor of 1x1xDxD bias.
_lowerCAmelCase : Any = torch.tril(torch.ones((n_positions, n_positions) ,dtype=torch.floataa ) ).view(
1 ,1 ,_lowerCamelCase ,_lowerCamelCase )
_lowerCAmelCase : str = causal_mask
# Insert a "dummy" tensor for masked_bias.
_lowerCAmelCase : str = torch.tensor(-1e4 ,dtype=torch.floataa )
_lowerCAmelCase : Union[str, Any] = masked_bias
_lowerCAmelCase : Tuple = fix_query_key_value_ordering(_lowerCamelCase ,_lowerCamelCase ,3 ,_lowerCamelCase ,_lowerCamelCase )
# Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D.
_lowerCAmelCase : Dict = out_val.transpose(0 ,1 ).contiguous()
# Store.
_lowerCAmelCase : List[Any] = out_val
# Transpose the bias.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "bias":
_lowerCAmelCase : Dict = fix_query_key_value_ordering(_lowerCamelCase ,_lowerCamelCase ,3 ,_lowerCamelCase ,_lowerCamelCase )
# Store. No change of shape.
_lowerCAmelCase : List[Any] = out_val
# Transpose the weights.
elif weight_or_bias == "weight":
_lowerCAmelCase : Dict = megatron_to_transformers[op_name]
_lowerCAmelCase : Dict = val.transpose(0 ,1 )
# Copy the bias.
elif weight_or_bias == "bias":
_lowerCAmelCase : Union[str, Any] = megatron_to_transformers[op_name]
_lowerCAmelCase : List[str] = val
# DEBUG.
assert config.n_layer == layer_idx + 1
# The final layernorm.
_lowerCAmelCase : Union[str, Any] = transformer["""final_layernorm.weight"""]
_lowerCAmelCase : Union[str, Any] = transformer["""final_layernorm.bias"""]
# For LM head, transformers' wants the matrix to weight embeddings.
_lowerCAmelCase : Dict = word_embeddings
# It should be done!
return output_state_dict
def SCREAMING_SNAKE_CASE ( ) -> int:
# Create the argument parser.
_lowerCAmelCase : Tuple = argparse.ArgumentParser()
parser.add_argument("""--print-checkpoint-structure""" ,action="""store_true""" )
parser.add_argument(
"""path_to_checkpoint""" ,type=_lowerCamelCase ,help="""Path to the checkpoint file (.zip archive or direct .pt file)""" ,)
parser.add_argument(
"""--config_file""" ,default="""""" ,type=_lowerCamelCase ,help="""An optional config json file describing the pre-trained model.""" ,)
_lowerCAmelCase : Dict = parser.parse_args()
# Extract the basename.
_lowerCAmelCase : Optional[int] = os.path.dirname(args.path_to_checkpoint )
# Load the model.
# the .zip is very optional, let's keep it for backward compatibility
print(f"Extracting PyTorch state dictionary from {args.path_to_checkpoint}" )
if args.path_to_checkpoint.endswith(""".zip""" ):
with zipfile.ZipFile(args.path_to_checkpoint ,"""r""" ) as checkpoint:
with checkpoint.open("""release/mp_rank_00/model_optim_rng.pt""" ) as pytorch_dict:
_lowerCAmelCase : int = torch.load(_lowerCamelCase ,map_location="""cpu""" )
else:
_lowerCAmelCase : List[Any] = torch.load(args.path_to_checkpoint ,map_location="""cpu""" )
_lowerCAmelCase : Union[str, Any] = input_state_dict.get("""args""" ,_lowerCamelCase )
# Read the config, or default to the model released by NVIDIA.
if args.config_file == "":
if ds_args is not None:
if ds_args.bias_gelu_fusion:
_lowerCAmelCase : Optional[Any] = """gelu_fast"""
elif ds_args.openai_gelu:
_lowerCAmelCase : Any = """gelu_new"""
else:
_lowerCAmelCase : int = """gelu"""
else:
# in the very early days this used to be "gelu_new"
_lowerCAmelCase : Tuple = """gelu_new"""
# Spell out all parameters in case the defaults change.
_lowerCAmelCase : Dict = GPTaConfig(
vocab_size=50257 ,n_positions=1024 ,n_embd=1024 ,n_layer=24 ,n_head=16 ,n_inner=4096 ,activation_function=_lowerCamelCase ,resid_pdrop=0.1 ,embd_pdrop=0.1 ,attn_pdrop=0.1 ,layer_norm_epsilon=1e-5 ,initializer_range=0.02 ,summary_type="""cls_index""" ,summary_use_proj=_lowerCamelCase ,summary_activation=_lowerCamelCase ,summary_proj_to_labels=_lowerCamelCase ,summary_first_dropout=0.1 ,scale_attn_weights=_lowerCamelCase ,use_cache=_lowerCamelCase ,bos_token_id=50256 ,eos_token_id=50256 ,)
else:
_lowerCAmelCase : int = GPTaConfig.from_json_file(args.config_file )
_lowerCAmelCase : Tuple = ["""GPT2LMHeadModel"""]
# Convert.
print("""Converting""" )
_lowerCAmelCase : int = convert_megatron_checkpoint(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
# Print the structure of converted state dict.
if args.print_checkpoint_structure:
recursive_print(_lowerCamelCase ,_lowerCamelCase )
# Add tokenizer class info to config
# see https://github.com/huggingface/transformers/issues/13906)
if ds_args is not None:
_lowerCAmelCase : str = ds_args.tokenizer_type
if tokenizer_type == "GPT2BPETokenizer":
_lowerCAmelCase : List[Any] = """gpt2"""
elif tokenizer_type == "PretrainedFromHF":
_lowerCAmelCase : str = ds_args.tokenizer_name_or_path
else:
raise ValueError(f"Unrecognized tokenizer_type {tokenizer_type}" )
else:
_lowerCAmelCase : Optional[Any] = """gpt2"""
_lowerCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained(_lowerCamelCase )
_lowerCAmelCase : int = type(_lowerCamelCase ).__name__
_lowerCAmelCase : str = tokenizer_class
# Store the config to file.
print("""Saving config""" )
config.save_pretrained(_lowerCamelCase )
# Save tokenizer based on args
print(f"Adding {tokenizer_class} tokenizer files" )
tokenizer.save_pretrained(_lowerCamelCase )
# Store the state_dict to file.
_lowerCAmelCase : int = os.path.join(_lowerCamelCase ,"""pytorch_model.bin""" )
print(f"Saving checkpoint to \"{output_checkpoint_file}\"" )
torch.save(_lowerCamelCase ,_lowerCamelCase )
####################################################################################################
if __name__ == "__main__":
main()
####################################################################################################
| 44
|
"""simple docstring"""
from scipy.stats import pearsonr
import datasets
_a : str = '\nPearson correlation coefficient and p-value for testing non-correlation.\nThe Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.\nThe p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.\n'
_a : List[str] = '\nArgs:\n predictions (`list` of `int`): Predicted class labels, as returned by a model.\n references (`list` of `int`): Ground truth labels.\n return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.\n\nReturns:\n pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.\n p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.\n\nExamples:\n\n Example 1-A simple example using only predictions and references.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n\n Example 2-The same as Example 1, but that also returns the `p-value`.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)\n >>> print(sorted(list(results.keys())))\n [\'p-value\', \'pearsonr\']\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n >>> print(round(results[\'p-value\'], 2))\n 0.15\n'
_a : List[Any] = '\n@article{2020SciPy-NMeth,\nauthor = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, Ilhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Antonio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\ntitle = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\njournal = {Nature Methods},\nyear = {2020},\nvolume = {17},\npages = {261--272},\nadsurl = {https://rdcu.be/b08Wh},\ndoi = {10.1038/s41592-019-0686-2},\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __A ( datasets.Metric ):
def __A ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""float""" ),
"""references""": datasets.Value("""float""" ),
} ) , reference_urls=["""https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html"""] , )
def __A ( self , a__ , a__ , a__=False ):
if return_pvalue:
_lowerCAmelCase : List[Any] = pearsonr(a__ , a__ )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(a__ , a__ )[0] )}
| 44
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_a : List[Any] = logging.get_logger(__name__)
_a : Union[str, Any] = {
'google/bit-50': 'https://huggingface.co/google/bit-50/resolve/main/config.json',
}
class __A ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Tuple = "bit"
_UpperCamelCase : Union[str, Any] = ["preactivation", "bottleneck"]
_UpperCamelCase : Dict = ["SAME", "VALID"]
def __init__( self , a__=3 , a__=64 , a__=[256, 512, 1024, 2048] , a__=[3, 4, 6, 3] , a__="preactivation" , a__="relu" , a__=None , a__=32 , a__=0.0 , a__=False , a__=32 , a__=1 , a__=None , a__=None , **a__ , ):
super().__init__(**a__ )
if layer_type not in self.layer_types:
raise ValueError(F"layer_type={layer_type} is not one of {','.join(self.layer_types )}" )
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
_lowerCAmelCase : List[str] = global_padding.upper()
else:
raise ValueError(F"Padding strategy {global_padding} not supported" )
_lowerCAmelCase : Dict = num_channels
_lowerCAmelCase : List[str] = embedding_size
_lowerCAmelCase : Optional[Any] = hidden_sizes
_lowerCAmelCase : List[str] = depths
_lowerCAmelCase : Any = layer_type
_lowerCAmelCase : Optional[Any] = hidden_act
_lowerCAmelCase : List[Any] = global_padding
_lowerCAmelCase : Optional[Any] = num_groups
_lowerCAmelCase : Tuple = drop_path_rate
_lowerCAmelCase : Any = embedding_dynamic_padding
_lowerCAmelCase : Optional[Any] = output_stride
_lowerCAmelCase : List[Any] = width_factor
_lowerCAmelCase : Any = ["""stem"""] + [F"stage{idx}" for idx in range(1 , len(a__ ) + 1 )]
_lowerCAmelCase , _lowerCAmelCase : int = get_aligned_output_features_output_indices(
out_features=a__ , out_indices=a__ , stage_names=self.stage_names )
| 44
|
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int = 50 ) -> int:
_lowerCAmelCase : int = [1] * (length + 1)
for row_length in range(3 ,length + 1 ):
for block_length in range(3 ,row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(F"""{solution() = }""")
| 44
| 1
|
"""simple docstring"""
_a : str = 256
# Modulus to hash a string
_a : Any = 1_000_003
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : str ,_lowerCamelCase : str ) -> bool:
_lowerCAmelCase : Tuple = len(_lowerCamelCase )
_lowerCAmelCase : str = len(_lowerCamelCase )
if p_len > t_len:
return False
_lowerCAmelCase : int = 0
_lowerCAmelCase : Union[str, Any] = 0
_lowerCAmelCase : Dict = 1
# Calculating the hash of pattern and substring of text
for i in range(_lowerCamelCase ):
_lowerCAmelCase : Tuple = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
_lowerCAmelCase : List[Any] = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
_lowerCAmelCase : Dict = (modulus_power * alphabet_size) % modulus
for i in range(0 ,t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
_lowerCAmelCase : int = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def SCREAMING_SNAKE_CASE ( ) -> None:
_lowerCAmelCase : int = """abc1abc12"""
_lowerCAmelCase : Tuple = """alskfjaldsabc1abc1abc12k23adsfabcabc"""
_lowerCAmelCase : Optional[Any] = """alskfjaldsk23adsfabcabc"""
assert rabin_karp(_lowerCamelCase ,_lowerCamelCase ) and not rabin_karp(_lowerCamelCase ,_lowerCamelCase )
# Test 2)
_lowerCAmelCase : List[str] = """ABABX"""
_lowerCAmelCase : Dict = """ABABZABABYABABX"""
assert rabin_karp(_lowerCamelCase ,_lowerCamelCase )
# Test 3)
_lowerCAmelCase : str = """AAAB"""
_lowerCAmelCase : Union[str, Any] = """ABAAAAAB"""
assert rabin_karp(_lowerCamelCase ,_lowerCamelCase )
# Test 4)
_lowerCAmelCase : Any = """abcdabcy"""
_lowerCAmelCase : Dict = """abcxabcdabxabcdabcdabcy"""
assert rabin_karp(_lowerCamelCase ,_lowerCamelCase )
# Test 5)
_lowerCAmelCase : Dict = """Lü"""
_lowerCAmelCase : List[str] = """Lüsai"""
assert rabin_karp(_lowerCamelCase ,_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = """Lue"""
assert not rabin_karp(_lowerCamelCase ,_lowerCamelCase )
print("""Success.""" )
if __name__ == "__main__":
test_rabin_karp()
| 44
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Optional[Any] = "naver-clova-ix/donut-base-finetuned-docvqa"
_UpperCamelCase : Dict = (
"This is a tool that answers a question about an document (pdf). It takes an input named `document` which "
"should be the document containing the information, as well as a `question` that is the question about the "
"document. It returns a text that contains the answer to the question."
)
_UpperCamelCase : Optional[int] = "document_qa"
_UpperCamelCase : Any = AutoProcessor
_UpperCamelCase : Union[str, Any] = VisionEncoderDecoderModel
_UpperCamelCase : Union[str, Any] = ["image", "text"]
_UpperCamelCase : List[str] = ["text"]
def __init__( self , *a__ , **a__ ):
if not is_vision_available():
raise ValueError("""Pillow must be installed to use the DocumentQuestionAnsweringTool.""" )
super().__init__(*a__ , **a__ )
def __A ( self , a__ , a__ ):
_lowerCAmelCase : Optional[int] = """<s_docvqa><s_question>{user_input}</s_question><s_answer>"""
_lowerCAmelCase : Dict = task_prompt.replace("""{user_input}""" , a__ )
_lowerCAmelCase : str = self.pre_processor.tokenizer(
a__ , add_special_tokens=a__ , return_tensors="""pt""" ).input_ids
_lowerCAmelCase : Dict = self.pre_processor(a__ , return_tensors="""pt""" ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def __A ( self , a__ ):
return self.model.generate(
inputs["""pixel_values"""].to(self.device ) , decoder_input_ids=inputs["""decoder_input_ids"""].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=a__ , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=a__ , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=a__ , ).sequences
def __A ( self , a__ ):
_lowerCAmelCase : Tuple = self.pre_processor.batch_decode(a__ )[0]
_lowerCAmelCase : int = sequence.replace(self.pre_processor.tokenizer.eos_token , """""" )
_lowerCAmelCase : List[str] = sequence.replace(self.pre_processor.tokenizer.pad_token , """""" )
_lowerCAmelCase : List[str] = re.sub(r"""<.*?>""" , """""" , a__ , count=1 ).strip() # remove first task start token
_lowerCAmelCase : List[str] = self.pre_processor.tokenajson(a__ )
return sequence["answer"]
| 44
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a : List[str] = logging.get_logger(__name__)
_a : Dict = {
'vinvino02/glpn-kitti': 'https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json',
# See all GLPN models at https://huggingface.co/models?filter=glpn
}
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : str = "glpn"
def __init__( self , a__=3 , a__=4 , a__=[2, 2, 2, 2] , a__=[8, 4, 2, 1] , a__=[32, 64, 160, 256] , a__=[7, 3, 3, 3] , a__=[4, 2, 2, 2] , a__=[1, 2, 5, 8] , a__=[4, 4, 4, 4] , a__="gelu" , a__=0.0 , a__=0.0 , a__=0.0_2 , a__=0.1 , a__=1e-6 , a__=64 , a__=10 , a__=-1 , **a__ , ):
super().__init__(**a__ )
_lowerCAmelCase : int = num_channels
_lowerCAmelCase : str = num_encoder_blocks
_lowerCAmelCase : str = depths
_lowerCAmelCase : str = sr_ratios
_lowerCAmelCase : Optional[Any] = hidden_sizes
_lowerCAmelCase : Any = patch_sizes
_lowerCAmelCase : Any = strides
_lowerCAmelCase : str = mlp_ratios
_lowerCAmelCase : Optional[int] = num_attention_heads
_lowerCAmelCase : Any = hidden_act
_lowerCAmelCase : Optional[int] = hidden_dropout_prob
_lowerCAmelCase : Any = attention_probs_dropout_prob
_lowerCAmelCase : str = initializer_range
_lowerCAmelCase : Any = drop_path_rate
_lowerCAmelCase : Dict = layer_norm_eps
_lowerCAmelCase : int = decoder_hidden_size
_lowerCAmelCase : Optional[Any] = max_depth
_lowerCAmelCase : Any = head_in_index
| 44
|
"""simple docstring"""
from __future__ import annotations
_a : List[str] = 10
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : list[int] ) -> list[int]:
_lowerCAmelCase : Optional[int] = 1
_lowerCAmelCase : Union[str, Any] = max(_lowerCamelCase )
while placement <= max_digit:
# declare and initialize empty buckets
_lowerCAmelCase : list[list] = [[] for _ in range(_lowerCamelCase )]
# split list_of_ints between the buckets
for i in list_of_ints:
_lowerCAmelCase : Tuple = int((i / placement) % RADIX )
buckets[tmp].append(_lowerCamelCase )
# put each buckets' contents into list_of_ints
_lowerCAmelCase : List[str] = 0
for b in range(_lowerCamelCase ):
for i in buckets[b]:
_lowerCAmelCase : Any = i
a += 1
# move to next
placement *= RADIX
return list_of_ints
if __name__ == "__main__":
import doctest
doctest.testmod()
| 44
| 1
|
"""simple docstring"""
_a : Any = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []}
_a : int = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]}
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : dict[int, list[int]] ,_lowerCamelCase : int ,_lowerCamelCase : list[bool] ) -> list[int]:
_lowerCAmelCase : Union[str, Any] = True
_lowerCAmelCase : List[str] = []
for neighbour in graph[vert]:
if not visited[neighbour]:
order += topology_sort(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
order.append(_lowerCamelCase )
return order
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : dict[int, list[int]] ,_lowerCamelCase : int ,_lowerCamelCase : list[bool] ) -> list[int]:
_lowerCAmelCase : int = True
_lowerCAmelCase : Union[str, Any] = [vert]
for neighbour in reversed_graph[vert]:
if not visited[neighbour]:
component += find_components(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
return component
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : dict[int, list[int]] ) -> list[list[int]]:
_lowerCAmelCase : Union[str, Any] = len(_lowerCamelCase ) * [False]
_lowerCAmelCase : dict[int, list[int]] = {vert: [] for vert in range(len(_lowerCamelCase ) )}
for vert, neighbours in graph.items():
for neighbour in neighbours:
reversed_graph[neighbour].append(_lowerCamelCase )
_lowerCAmelCase : str = []
for i, was_visited in enumerate(_lowerCamelCase ):
if not was_visited:
order += topology_sort(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = []
_lowerCAmelCase : str = len(_lowerCamelCase ) * [False]
for i in range(len(_lowerCamelCase ) ):
_lowerCAmelCase : Tuple = order[len(_lowerCamelCase ) - i - 1]
if not visited[vert]:
_lowerCAmelCase : int = find_components(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
components_list.append(_lowerCamelCase )
return components_list
| 44
|
"""simple docstring"""
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
'stable diffusion controlnet',
'0.22.0',
'Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.',
standard_warn=False,
stacklevel=3,
)
| 44
| 1
|
"""simple docstring"""
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope="""session""" )
def SCREAMING_SNAKE_CASE ( ) -> Tuple:
_lowerCAmelCase : List[Any] = 10
_lowerCAmelCase : List[str] = datasets.Features(
{
"""tokens""": datasets.Sequence(datasets.Value("""string""" ) ),
"""labels""": datasets.Sequence(datasets.ClassLabel(names=["""negative""", """positive"""] ) ),
"""answers""": datasets.Sequence(
{
"""text""": datasets.Value("""string""" ),
"""answer_start""": datasets.Value("""int32""" ),
} ),
"""id""": datasets.Value("""int64""" ),
} )
_lowerCAmelCase : Optional[Any] = datasets.Dataset.from_dict(
{
"""tokens""": [["""foo"""] * 5] * n,
"""labels""": [[1] * 5] * n,
"""answers""": [{"""answer_start""": [97], """text""": ["""1976"""]}] * 10,
"""id""": list(range(_lowerCamelCase ) ),
} ,features=_lowerCamelCase ,)
return dataset
@pytest.fixture(scope="""session""" )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Dict ,_lowerCamelCase : int ) -> List[Any]:
_lowerCAmelCase : Tuple = str(tmp_path_factory.mktemp("""data""" ) / """file.arrow""" )
dataset.map(cache_file_name=_lowerCamelCase )
return filename
# FILE_CONTENT + files
_a : List[str] = '\\n Text data.\n Second line of data.'
@pytest.fixture(scope="""session""" )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[str] ) -> Union[str, Any]:
_lowerCAmelCase : Optional[Any] = tmp_path_factory.mktemp("""data""" ) / """file.txt"""
_lowerCAmelCase : Union[str, Any] = FILE_CONTENT
with open(_lowerCamelCase ,"""w""" ) as f:
f.write(_lowerCamelCase )
return filename
@pytest.fixture(scope="""session""" )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int ) -> Any:
import bza
_lowerCAmelCase : Optional[int] = tmp_path_factory.mktemp("""data""" ) / """file.txt.bz2"""
_lowerCAmelCase : Optional[Any] = bytes(_lowerCamelCase ,"""utf-8""" )
with bza.open(_lowerCamelCase ,"""wb""" ) as f:
f.write(_lowerCamelCase )
return path
@pytest.fixture(scope="""session""" )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Tuple ) -> Optional[Any]:
import gzip
_lowerCAmelCase : Dict = str(tmp_path_factory.mktemp("""data""" ) / """file.txt.gz""" )
_lowerCAmelCase : Dict = bytes(_lowerCamelCase ,"""utf-8""" )
with gzip.open(_lowerCamelCase ,"""wb""" ) as f:
f.write(_lowerCamelCase )
return path
@pytest.fixture(scope="""session""" )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Dict ) -> int:
if datasets.config.LZ4_AVAILABLE:
import lza.frame
_lowerCAmelCase : List[str] = tmp_path_factory.mktemp("""data""" ) / """file.txt.lz4"""
_lowerCAmelCase : str = bytes(_lowerCamelCase ,"""utf-8""" )
with lza.frame.open(_lowerCamelCase ,"""wb""" ) as f:
f.write(_lowerCamelCase )
return path
@pytest.fixture(scope="""session""" )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[Any] ,_lowerCamelCase : List[str] ) -> int:
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
_lowerCAmelCase : Dict = tmp_path_factory.mktemp("""data""" ) / """file.txt.7z"""
with pyazr.SevenZipFile(_lowerCamelCase ,"""w""" ) as archive:
archive.write(_lowerCamelCase ,arcname=os.path.basename(_lowerCamelCase ) )
return path
@pytest.fixture(scope="""session""" )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int ,_lowerCamelCase : Dict ) -> int:
import tarfile
_lowerCAmelCase : List[str] = tmp_path_factory.mktemp("""data""" ) / """file.txt.tar"""
with tarfile.TarFile(_lowerCamelCase ,"""w""" ) as f:
f.add(_lowerCamelCase ,arcname=os.path.basename(_lowerCamelCase ) )
return path
@pytest.fixture(scope="""session""" )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[str] ) -> Tuple:
import lzma
_lowerCAmelCase : Union[str, Any] = tmp_path_factory.mktemp("""data""" ) / """file.txt.xz"""
_lowerCAmelCase : Optional[int] = bytes(_lowerCamelCase ,"""utf-8""" )
with lzma.open(_lowerCamelCase ,"""wb""" ) as f:
f.write(_lowerCamelCase )
return path
@pytest.fixture(scope="""session""" )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[str] ,_lowerCamelCase : str ) -> Tuple:
import zipfile
_lowerCAmelCase : List[str] = tmp_path_factory.mktemp("""data""" ) / """file.txt.zip"""
with zipfile.ZipFile(_lowerCamelCase ,"""w""" ) as f:
f.write(_lowerCamelCase ,arcname=os.path.basename(_lowerCamelCase ) )
return path
@pytest.fixture(scope="""session""" )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int ) -> List[str]:
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
_lowerCAmelCase : List[Any] = tmp_path_factory.mktemp("""data""" ) / """file.txt.zst"""
_lowerCAmelCase : Tuple = bytes(_lowerCamelCase ,"""utf-8""" )
with zstd.open(_lowerCamelCase ,"""wb""" ) as f:
f.write(_lowerCamelCase )
return path
@pytest.fixture(scope="""session""" )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int ) -> Any:
_lowerCAmelCase : Any = tmp_path_factory.mktemp("""data""" ) / """file.xml"""
_lowerCAmelCase : Any = textwrap.dedent(
"""\
<?xml version=\"1.0\" encoding=\"UTF-8\" ?>
<tmx version=\"1.4\">
<header segtype=\"sentence\" srclang=\"ca\" />
<body>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 1</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 1</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 2</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 2</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 3</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 3</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 4</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 4</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 5</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 5</seg></tuv>
</tu>
</body>
</tmx>""" )
with open(_lowerCamelCase ,"""w""" ) as f:
f.write(_lowerCamelCase )
return filename
_a : Union[str, Any] = [
{'col_1': '0', 'col_2': 0, 'col_3': 0.0},
{'col_1': '1', 'col_2': 1, 'col_3': 1.0},
{'col_1': '2', 'col_2': 2, 'col_3': 2.0},
{'col_1': '3', 'col_2': 3, 'col_3': 3.0},
]
_a : Any = [
{'col_1': '4', 'col_2': 4, 'col_3': 4.0},
{'col_1': '5', 'col_2': 5, 'col_3': 5.0},
]
_a : List[Any] = {
'col_1': ['0', '1', '2', '3'],
'col_2': [0, 1, 2, 3],
'col_3': [0.0, 1.0, 2.0, 3.0],
}
_a : Any = [
{'col_3': 0.0, 'col_1': '0', 'col_2': 0},
{'col_3': 1.0, 'col_1': '1', 'col_2': 1},
]
_a : Optional[Any] = [
{'col_1': 's0', 'col_2': 0, 'col_3': 0.0},
{'col_1': 's1', 'col_2': 1, 'col_3': 1.0},
{'col_1': 's2', 'col_2': 2, 'col_3': 2.0},
{'col_1': 's3', 'col_2': 3, 'col_3': 3.0},
]
@pytest.fixture(scope="""session""" )
def SCREAMING_SNAKE_CASE ( ) -> int:
return DATA_DICT_OF_LISTS
@pytest.fixture(scope="""session""" )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Dict ) -> str:
_lowerCAmelCase : Union[str, Any] = datasets.Dataset.from_dict(_lowerCamelCase )
_lowerCAmelCase : int = str(tmp_path_factory.mktemp("""data""" ) / """dataset.arrow""" )
dataset.map(cache_file_name=_lowerCamelCase )
return path
@pytest.fixture(scope="""session""" )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[Any] ) -> Dict:
_lowerCAmelCase : List[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.sqlite""" )
with contextlib.closing(sqlitea.connect(_lowerCamelCase ) ) as con:
_lowerCAmelCase : int = con.cursor()
cur.execute("""CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)""" )
for item in DATA:
cur.execute("""INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)""" ,tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope="""session""" )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Any ) -> List[Any]:
_lowerCAmelCase : Optional[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.csv""" )
with open(_lowerCamelCase ,"""w""" ,newline="""""" ) as f:
_lowerCAmelCase : str = csv.DictWriter(_lowerCamelCase ,fieldnames=["""col_1""", """col_2""", """col_3"""] )
writer.writeheader()
for item in DATA:
writer.writerow(_lowerCamelCase )
return path
@pytest.fixture(scope="""session""" )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Any ) -> List[str]:
_lowerCAmelCase : str = str(tmp_path_factory.mktemp("""data""" ) / """dataset2.csv""" )
with open(_lowerCamelCase ,"""w""" ,newline="""""" ) as f:
_lowerCAmelCase : Tuple = csv.DictWriter(_lowerCamelCase ,fieldnames=["""col_1""", """col_2""", """col_3"""] )
writer.writeheader()
for item in DATA:
writer.writerow(_lowerCamelCase )
return path
@pytest.fixture(scope="""session""" )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[Any] ,_lowerCamelCase : List[Any] ) -> Optional[int]:
import bza
_lowerCAmelCase : Any = tmp_path_factory.mktemp("""data""" ) / """dataset.csv.bz2"""
with open(_lowerCamelCase ,"""rb""" ) as f:
_lowerCAmelCase : List[str] = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(_lowerCamelCase ,"""wb""" ) as f:
f.write(_lowerCamelCase )
return path
@pytest.fixture(scope="""session""" )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Tuple ,_lowerCamelCase : str ,_lowerCamelCase : Optional[Any] ) -> Dict:
_lowerCAmelCase : Dict = tmp_path_factory.mktemp("""data""" ) / """dataset.csv.zip"""
with zipfile.ZipFile(_lowerCamelCase ,"""w""" ) as f:
f.write(_lowerCamelCase ,arcname=os.path.basename(_lowerCamelCase ) )
f.write(_lowerCamelCase ,arcname=os.path.basename(_lowerCamelCase ) )
return path
@pytest.fixture(scope="""session""" )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[str] ,_lowerCamelCase : Tuple ,_lowerCamelCase : Tuple ) -> Tuple:
_lowerCAmelCase : Any = tmp_path_factory.mktemp("""data""" ) / """dataset.csv.zip"""
with zipfile.ZipFile(_lowerCamelCase ,"""w""" ) as f:
f.write(_lowerCamelCase ,arcname=os.path.basename(csv_path.replace(""".csv""" ,""".CSV""" ) ) )
f.write(_lowerCamelCase ,arcname=os.path.basename(csva_path.replace(""".csv""" ,""".CSV""" ) ) )
return path
@pytest.fixture(scope="""session""" )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, Any] ,_lowerCamelCase : Optional[Any] ,_lowerCamelCase : Union[str, Any] ) -> int:
_lowerCAmelCase : List[Any] = tmp_path_factory.mktemp("""data""" ) / """dataset_with_dir.csv.zip"""
with zipfile.ZipFile(_lowerCamelCase ,"""w""" ) as f:
f.write(_lowerCamelCase ,arcname=os.path.join("""main_dir""" ,os.path.basename(_lowerCamelCase ) ) )
f.write(_lowerCamelCase ,arcname=os.path.join("""main_dir""" ,os.path.basename(_lowerCamelCase ) ) )
return path
@pytest.fixture(scope="""session""" )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[Any] ) -> Union[str, Any]:
_lowerCAmelCase : int = str(tmp_path_factory.mktemp("""data""" ) / """dataset.parquet""" )
_lowerCAmelCase : Any = pa.schema(
{
"""col_1""": pa.string(),
"""col_2""": pa.intaa(),
"""col_3""": pa.floataa(),
} )
with open(_lowerCamelCase ,"""wb""" ) as f:
_lowerCAmelCase : List[str] = pq.ParquetWriter(_lowerCamelCase ,schema=_lowerCamelCase )
_lowerCAmelCase : Dict = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(_lowerCamelCase ) )] for k in DATA[0]} ,schema=_lowerCamelCase )
writer.write_table(_lowerCamelCase )
writer.close()
return path
@pytest.fixture(scope="""session""" )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[int] ) -> Union[str, Any]:
_lowerCAmelCase : Any = str(tmp_path_factory.mktemp("""data""" ) / """dataset.json""" )
_lowerCAmelCase : List[Any] = {"""data""": DATA}
with open(_lowerCamelCase ,"""w""" ) as f:
json.dump(_lowerCamelCase ,_lowerCamelCase )
return path
@pytest.fixture(scope="""session""" )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : str ) -> Optional[int]:
_lowerCAmelCase : int = str(tmp_path_factory.mktemp("""data""" ) / """dataset.json""" )
_lowerCAmelCase : List[str] = {"""data""": DATA_DICT_OF_LISTS}
with open(_lowerCamelCase ,"""w""" ) as f:
json.dump(_lowerCamelCase ,_lowerCamelCase )
return path
@pytest.fixture(scope="""session""" )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[int] ) -> Optional[Any]:
_lowerCAmelCase : List[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl""" )
with open(_lowerCamelCase ,"""w""" ) as f:
for item in DATA:
f.write(json.dumps(_lowerCamelCase ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Any ) -> Optional[int]:
_lowerCAmelCase : List[str] = str(tmp_path_factory.mktemp("""data""" ) / """dataset2.jsonl""" )
with open(_lowerCamelCase ,"""w""" ) as f:
for item in DATA:
f.write(json.dumps(_lowerCamelCase ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[Any] ) -> str:
_lowerCAmelCase : Optional[int] = str(tmp_path_factory.mktemp("""data""" ) / """dataset_312.jsonl""" )
with open(_lowerCamelCase ,"""w""" ) as f:
for item in DATA_312:
f.write(json.dumps(_lowerCamelCase ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, Any] ) -> str:
_lowerCAmelCase : Optional[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset-str.jsonl""" )
with open(_lowerCamelCase ,"""w""" ) as f:
for item in DATA_STR:
f.write(json.dumps(_lowerCamelCase ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Tuple ,_lowerCamelCase : List[Any] ) -> int:
import gzip
_lowerCAmelCase : str = str(tmp_path_factory.mktemp("""data""" ) / """dataset.txt.gz""" )
with open(_lowerCamelCase ,"""rb""" ) as orig_file:
with gzip.open(_lowerCamelCase ,"""wb""" ) as zipped_file:
zipped_file.writelines(_lowerCamelCase )
return path
@pytest.fixture(scope="""session""" )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : str ,_lowerCamelCase : Optional[int] ) -> Union[str, Any]:
import gzip
_lowerCAmelCase : str = str(tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.gz""" )
with open(_lowerCamelCase ,"""rb""" ) as orig_file:
with gzip.open(_lowerCamelCase ,"""wb""" ) as zipped_file:
zipped_file.writelines(_lowerCamelCase )
return path
@pytest.fixture(scope="""session""" )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[str] ,_lowerCamelCase : List[Any] ,_lowerCamelCase : Optional[int] ) -> Any:
_lowerCAmelCase : Tuple = tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.zip"""
with zipfile.ZipFile(_lowerCamelCase ,"""w""" ) as f:
f.write(_lowerCamelCase ,arcname=os.path.basename(_lowerCamelCase ) )
f.write(_lowerCamelCase ,arcname=os.path.basename(_lowerCamelCase ) )
return path
@pytest.fixture(scope="""session""" )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : str ,_lowerCamelCase : Union[str, Any] ,_lowerCamelCase : int ,_lowerCamelCase : Union[str, Any] ) -> List[str]:
_lowerCAmelCase : List[Any] = tmp_path_factory.mktemp("""data""" ) / """dataset_nested.jsonl.zip"""
with zipfile.ZipFile(_lowerCamelCase ,"""w""" ) as f:
f.write(_lowerCamelCase ,arcname=os.path.join("""nested""" ,os.path.basename(_lowerCamelCase ) ) )
return path
@pytest.fixture(scope="""session""" )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[str] ,_lowerCamelCase : str ,_lowerCamelCase : List[Any] ) -> Union[str, Any]:
_lowerCAmelCase : Dict = tmp_path_factory.mktemp("""data""" ) / """dataset_with_dir.jsonl.zip"""
with zipfile.ZipFile(_lowerCamelCase ,"""w""" ) as f:
f.write(_lowerCamelCase ,arcname=os.path.join("""main_dir""" ,os.path.basename(_lowerCamelCase ) ) )
f.write(_lowerCamelCase ,arcname=os.path.join("""main_dir""" ,os.path.basename(_lowerCamelCase ) ) )
return path
@pytest.fixture(scope="""session""" )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Any ,_lowerCamelCase : Any ,_lowerCamelCase : List[str] ) -> Any:
_lowerCAmelCase : str = tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.tar"""
with tarfile.TarFile(_lowerCamelCase ,"""w""" ) as f:
f.add(_lowerCamelCase ,arcname=os.path.basename(_lowerCamelCase ) )
f.add(_lowerCamelCase ,arcname=os.path.basename(_lowerCamelCase ) )
return path
@pytest.fixture(scope="""session""" )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Any ,_lowerCamelCase : int ,_lowerCamelCase : List[Any] ,_lowerCamelCase : Optional[int] ) -> str:
_lowerCAmelCase : Any = tmp_path_factory.mktemp("""data""" ) / """dataset_nested.jsonl.tar"""
with tarfile.TarFile(_lowerCamelCase ,"""w""" ) as f:
f.add(_lowerCamelCase ,arcname=os.path.join("""nested""" ,os.path.basename(_lowerCamelCase ) ) )
return path
@pytest.fixture(scope="""session""" )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[Any] ) -> Optional[Any]:
_lowerCAmelCase : Any = ["""0""", """1""", """2""", """3"""]
_lowerCAmelCase : Any = str(tmp_path_factory.mktemp("""data""" ) / """dataset.txt""" )
with open(_lowerCamelCase ,"""w""" ) as f:
for item in data:
f.write(item + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Any ) -> Union[str, Any]:
_lowerCAmelCase : List[Any] = ["""0""", """1""", """2""", """3"""]
_lowerCAmelCase : Optional[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset2.txt""" )
with open(_lowerCamelCase ,"""w""" ) as f:
for item in data:
f.write(item + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[Any] ) -> Tuple:
_lowerCAmelCase : int = ["""0""", """1""", """2""", """3"""]
_lowerCAmelCase : Tuple = tmp_path_factory.mktemp("""data""" ) / """dataset.abc"""
with open(_lowerCamelCase ,"""w""" ) as f:
for item in data:
f.write(item + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[int] ,_lowerCamelCase : int ,_lowerCamelCase : str ) -> Any:
_lowerCAmelCase : Any = tmp_path_factory.mktemp("""data""" ) / """dataset.text.zip"""
with zipfile.ZipFile(_lowerCamelCase ,"""w""" ) as f:
f.write(_lowerCamelCase ,arcname=os.path.basename(_lowerCamelCase ) )
f.write(_lowerCamelCase ,arcname=os.path.basename(_lowerCamelCase ) )
return path
@pytest.fixture(scope="""session""" )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : str ,_lowerCamelCase : Union[str, Any] ,_lowerCamelCase : List[Any] ) -> List[str]:
_lowerCAmelCase : Any = tmp_path_factory.mktemp("""data""" ) / """dataset_with_dir.text.zip"""
with zipfile.ZipFile(_lowerCamelCase ,"""w""" ) as f:
f.write(_lowerCamelCase ,arcname=os.path.join("""main_dir""" ,os.path.basename(_lowerCamelCase ) ) )
f.write(_lowerCamelCase ,arcname=os.path.join("""main_dir""" ,os.path.basename(_lowerCamelCase ) ) )
return path
@pytest.fixture(scope="""session""" )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int ,_lowerCamelCase : int ,_lowerCamelCase : Tuple ) -> Dict:
_lowerCAmelCase : Optional[int] = tmp_path_factory.mktemp("""data""" ) / """dataset.ext.zip"""
with zipfile.ZipFile(_lowerCamelCase ,"""w""" ) as f:
f.write(_lowerCamelCase ,arcname=os.path.basename("""unsupported.ext""" ) )
f.write(_lowerCamelCase ,arcname=os.path.basename("""unsupported_2.ext""" ) )
return path
@pytest.fixture(scope="""session""" )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[str] ) -> Tuple:
_lowerCAmelCase : List[str] = """\n""".join(["""First""", """Second\u2029with Unicode new line""", """Third"""] )
_lowerCAmelCase : Tuple = str(tmp_path_factory.mktemp("""data""" ) / """dataset_with_unicode_new_lines.txt""" )
with open(_lowerCamelCase ,"""w""" ,encoding="""utf-8""" ) as f:
f.write(_lowerCamelCase )
return path
@pytest.fixture(scope="""session""" )
def SCREAMING_SNAKE_CASE ( ) -> Dict:
return os.path.join("""tests""" ,"""features""" ,"""data""" ,"""test_image_rgb.jpg""" )
@pytest.fixture(scope="""session""" )
def SCREAMING_SNAKE_CASE ( ) -> Optional[Any]:
return os.path.join("""tests""" ,"""features""" ,"""data""" ,"""test_audio_44100.wav""" )
@pytest.fixture(scope="""session""" )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int ,_lowerCamelCase : Optional[int] ) -> Dict:
_lowerCAmelCase : Tuple = tmp_path_factory.mktemp("""data""" ) / """dataset.img.zip"""
with zipfile.ZipFile(_lowerCamelCase ,"""w""" ) as f:
f.write(_lowerCamelCase ,arcname=os.path.basename(_lowerCamelCase ) )
f.write(_lowerCamelCase ,arcname=os.path.basename(_lowerCamelCase ).replace(""".jpg""" ,"""2.jpg""" ) )
return path
@pytest.fixture(scope="""session""" )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Any ) -> Optional[int]:
_lowerCAmelCase : Tuple = tmp_path_factory.mktemp("""data_dir""" )
(data_dir / "subdir").mkdir()
with open(data_dir / """subdir""" / """train.txt""" ,"""w""" ) as f:
f.write("""foo\n""" * 10 )
with open(data_dir / """subdir""" / """test.txt""" ,"""w""" ) as f:
f.write("""bar\n""" * 10 )
# hidden file
with open(data_dir / """subdir""" / """.test.txt""" ,"""w""" ) as f:
f.write("""bar\n""" * 10 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / """.subdir""" / """train.txt""" ,"""w""" ) as f:
f.write("""foo\n""" * 10 )
with open(data_dir / """.subdir""" / """test.txt""" ,"""w""" ) as f:
f.write("""bar\n""" * 10 )
return data_dir
| 44
|
"""simple docstring"""
import unittest
from transformers import DebertaVaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaVaForMaskedLM,
DebertaVaForMultipleChoice,
DebertaVaForQuestionAnswering,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaModel,
)
from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST
class __A ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , a__ , a__=13 , a__=7 , a__=True , a__=True , a__=True , a__=True , a__=99 , a__=32 , a__=5 , a__=4 , a__=37 , a__="gelu" , a__=0.1 , a__=0.1 , a__=512 , a__=16 , a__=2 , a__=0.0_2 , a__=False , a__=True , a__="None" , a__=3 , a__=4 , a__=None , ):
_lowerCAmelCase : Dict = parent
_lowerCAmelCase : str = batch_size
_lowerCAmelCase : List[Any] = seq_length
_lowerCAmelCase : Dict = is_training
_lowerCAmelCase : Dict = use_input_mask
_lowerCAmelCase : int = use_token_type_ids
_lowerCAmelCase : int = use_labels
_lowerCAmelCase : Optional[int] = vocab_size
_lowerCAmelCase : Tuple = hidden_size
_lowerCAmelCase : Tuple = num_hidden_layers
_lowerCAmelCase : Dict = num_attention_heads
_lowerCAmelCase : Union[str, Any] = intermediate_size
_lowerCAmelCase : str = hidden_act
_lowerCAmelCase : Any = hidden_dropout_prob
_lowerCAmelCase : List[str] = attention_probs_dropout_prob
_lowerCAmelCase : List[str] = max_position_embeddings
_lowerCAmelCase : List[str] = type_vocab_size
_lowerCAmelCase : Tuple = type_sequence_label_size
_lowerCAmelCase : List[Any] = initializer_range
_lowerCAmelCase : Union[str, Any] = num_labels
_lowerCAmelCase : Optional[Any] = num_choices
_lowerCAmelCase : Tuple = relative_attention
_lowerCAmelCase : Tuple = position_biased_input
_lowerCAmelCase : Dict = pos_att_type
_lowerCAmelCase : Any = scope
def __A ( self ):
_lowerCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase : Optional[Any] = None
if self.use_input_mask:
_lowerCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
_lowerCAmelCase : str = None
if self.use_token_type_ids:
_lowerCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowerCAmelCase : Union[str, Any] = None
_lowerCAmelCase : Union[str, Any] = None
_lowerCAmelCase : Any = None
if self.use_labels:
_lowerCAmelCase : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowerCAmelCase : str = ids_tensor([self.batch_size] , self.num_choices )
_lowerCAmelCase : List[str] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __A ( self ):
return DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def __A ( self , a__ ):
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : Union[str, Any] = DebertaVaModel(config=a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : List[Any] = model(a__ , attention_mask=a__ , token_type_ids=a__ )[0]
_lowerCAmelCase : List[Any] = model(a__ , token_type_ids=a__ )[0]
_lowerCAmelCase : Any = model(a__ )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : List[str] = DebertaVaForMaskedLM(config=a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : Optional[Any] = model(a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : int = self.num_labels
_lowerCAmelCase : int = DebertaVaForSequenceClassification(a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : Optional[Any] = model(a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(a__ )
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : List[Any] = self.num_labels
_lowerCAmelCase : str = DebertaVaForTokenClassification(config=a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : Optional[Any] = model(a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : Any = DebertaVaForQuestionAnswering(config=a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : Dict = model(
a__ , attention_mask=a__ , token_type_ids=a__ , start_positions=a__ , end_positions=a__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : Union[str, Any] = DebertaVaForMultipleChoice(config=a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : Optional[int] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCAmelCase : Optional[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCAmelCase : str = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCAmelCase : List[str] = model(
a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __A ( self ):
_lowerCAmelCase : Tuple = self.prepare_config_and_inputs()
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) : Union[str, Any] = config_and_inputs
_lowerCAmelCase : Union[str, Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __A ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : Union[str, Any] = (
(
DebertaVaModel,
DebertaVaForMaskedLM,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaForQuestionAnswering,
DebertaVaForMultipleChoice,
)
if is_torch_available()
else ()
)
_UpperCamelCase : str = (
{
"feature-extraction": DebertaVaModel,
"fill-mask": DebertaVaForMaskedLM,
"question-answering": DebertaVaForQuestionAnswering,
"text-classification": DebertaVaForSequenceClassification,
"token-classification": DebertaVaForTokenClassification,
"zero-shot": DebertaVaForSequenceClassification,
}
if is_torch_available()
else {}
)
_UpperCamelCase : Optional[Any] = True
_UpperCamelCase : List[Any] = False
_UpperCamelCase : List[Any] = False
_UpperCamelCase : Dict = False
_UpperCamelCase : Tuple = False
def __A ( self ):
_lowerCAmelCase : Optional[Any] = DebertaVaModelTester(self )
_lowerCAmelCase : Any = ConfigTester(self , config_class=a__ , hidden_size=37 )
def __A ( self ):
self.config_tester.run_common_tests()
def __A ( self ):
_lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*a__ )
def __A ( self ):
_lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*a__ )
def __A ( self ):
_lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*a__ )
def __A ( self ):
_lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*a__ )
def __A ( self ):
_lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*a__ )
def __A ( self ):
_lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_multiple_choice(*a__ )
@slow
def __A ( self ):
for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : Tuple = DebertaVaModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
@require_torch
@require_sentencepiece
@require_tokenizers
class __A ( unittest.TestCase ):
@unittest.skip(reason="""Model not available yet""" )
def __A ( self ):
pass
@slow
def __A ( self ):
_lowerCAmelCase : Union[str, Any] = DebertaVaModel.from_pretrained("""microsoft/deberta-v2-xlarge""" )
_lowerCAmelCase : Dict = torch.tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] )
_lowerCAmelCase : Any = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_lowerCAmelCase : Optional[Any] = model(a__ , attention_mask=a__ )[0]
# compare the actual values for a slice.
_lowerCAmelCase : str = torch.tensor(
[[[0.2_3_5_6, 0.1_9_4_8, 0.0_3_6_9], [-0.1_0_6_3, 0.3_5_8_6, -0.5_1_5_2], [-0.6_3_9_9, -0.0_2_5_9, -0.2_5_2_5]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , a__ , atol=1e-4 ) , F"{output[:, 1:4, 1:4]}" )
| 44
| 1
|
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from ...utils.constants import SAGEMAKER_PARALLEL_EC2_INSTANCES, TORCH_DYNAMO_MODES
from ...utils.dataclasses import ComputeEnvironment, SageMakerDistributedType
from ...utils.imports import is_botoa_available
from .config_args import SageMakerConfig
from .config_utils import (
DYNAMO_BACKENDS,
_ask_field,
_ask_options,
_convert_dynamo_backend,
_convert_mixed_precision,
_convert_sagemaker_distributed_mode,
_convert_yes_no_to_bool,
)
if is_botoa_available():
import botoa # noqa: F401
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Tuple ) -> Optional[Any]:
_lowerCAmelCase : List[Any] = botoa.client("""iam""" )
_lowerCAmelCase : Dict = {
"""Version""": """2012-10-17""",
"""Statement""": [
{"""Effect""": """Allow""", """Principal""": {"""Service""": """sagemaker.amazonaws.com"""}, """Action""": """sts:AssumeRole"""}
],
}
try:
# create the role, associated with the chosen trust policy
iam_client.create_role(
RoleName=_lowerCamelCase ,AssumeRolePolicyDocument=json.dumps(_lowerCamelCase ,indent=2 ) )
_lowerCAmelCase : Union[str, Any] = {
"""Version""": """2012-10-17""",
"""Statement""": [
{
"""Effect""": """Allow""",
"""Action""": [
"""sagemaker:*""",
"""ecr:GetDownloadUrlForLayer""",
"""ecr:BatchGetImage""",
"""ecr:BatchCheckLayerAvailability""",
"""ecr:GetAuthorizationToken""",
"""cloudwatch:PutMetricData""",
"""cloudwatch:GetMetricData""",
"""cloudwatch:GetMetricStatistics""",
"""cloudwatch:ListMetrics""",
"""logs:CreateLogGroup""",
"""logs:CreateLogStream""",
"""logs:DescribeLogStreams""",
"""logs:PutLogEvents""",
"""logs:GetLogEvents""",
"""s3:CreateBucket""",
"""s3:ListBucket""",
"""s3:GetBucketLocation""",
"""s3:GetObject""",
"""s3:PutObject""",
],
"""Resource""": """*""",
}
],
}
# attach policy to role
iam_client.put_role_policy(
RoleName=_lowerCamelCase ,PolicyName=f"{role_name}_policy_permission" ,PolicyDocument=json.dumps(_lowerCamelCase ,indent=2 ) ,)
except iam_client.exceptions.EntityAlreadyExistsException:
print(f"role {role_name} already exists. Using existing one" )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int ) -> Optional[int]:
_lowerCAmelCase : List[str] = botoa.client("""iam""" )
return iam_client.get_role(RoleName=_lowerCamelCase )["Role"]["Arn"]
def SCREAMING_SNAKE_CASE ( ) -> int:
_lowerCAmelCase : List[Any] = _ask_options(
"""How do you want to authorize?""" ,["""AWS Profile""", """Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) """] ,_lowerCamelCase ,)
_lowerCAmelCase : Optional[Any] = None
if credentials_configuration == 0:
_lowerCAmelCase : Union[str, Any] = _ask_field("""Enter your AWS Profile name: [default] """ ,default="""default""" )
_lowerCAmelCase : List[Any] = aws_profile
else:
print(
"""Note you will need to provide AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY when you launch you training script with,"""
"""`accelerate launch --aws_access_key_id XXX --aws_secret_access_key YYY`""" )
_lowerCAmelCase : str = _ask_field("""AWS Access Key ID: """ )
_lowerCAmelCase : List[str] = aws_access_key_id
_lowerCAmelCase : List[Any] = _ask_field("""AWS Secret Access Key: """ )
_lowerCAmelCase : int = aws_secret_access_key
_lowerCAmelCase : Any = _ask_field("""Enter your AWS Region: [us-east-1]""" ,default="""us-east-1""" )
_lowerCAmelCase : int = aws_region
_lowerCAmelCase : Union[str, Any] = _ask_options(
"""Do you already have an IAM Role for executing Amazon SageMaker Training Jobs?""" ,["""Provide IAM Role name""", """Create new IAM role using credentials"""] ,_lowerCamelCase ,)
if role_management == 0:
_lowerCAmelCase : Any = _ask_field("""Enter your IAM role name: """ )
else:
_lowerCAmelCase : Optional[int] = """accelerate_sagemaker_execution_role"""
print(f"Accelerate will create an iam role \"{iam_role_name}\" using the provided credentials" )
_create_iam_role_for_sagemaker(_lowerCamelCase )
_lowerCAmelCase : int = _ask_field(
"""Do you want to use custom Docker image? [yes/NO]: """ ,_convert_yes_no_to_bool ,default=_lowerCamelCase ,error_message="""Please enter yes or no.""" ,)
_lowerCAmelCase : Tuple = None
if is_custom_docker_image:
_lowerCAmelCase : Optional[int] = _ask_field("""Enter your Docker image: """ ,lambda _lowerCamelCase : str(_lowerCamelCase ).lower() )
_lowerCAmelCase : Dict = _ask_field(
"""Do you want to provide SageMaker input channels with data locations? [yes/NO]: """ ,_convert_yes_no_to_bool ,default=_lowerCamelCase ,error_message="""Please enter yes or no.""" ,)
_lowerCAmelCase : Tuple = None
if is_sagemaker_inputs_enabled:
_lowerCAmelCase : str = _ask_field(
"""Enter the path to the SageMaker inputs TSV file with columns (channel_name, data_location): """ ,lambda _lowerCamelCase : str(_lowerCamelCase ).lower() ,)
_lowerCAmelCase : Dict = _ask_field(
"""Do you want to enable SageMaker metrics? [yes/NO]: """ ,_convert_yes_no_to_bool ,default=_lowerCamelCase ,error_message="""Please enter yes or no.""" ,)
_lowerCAmelCase : Union[str, Any] = None
if is_sagemaker_metrics_enabled:
_lowerCAmelCase : Dict = _ask_field(
"""Enter the path to the SageMaker metrics TSV file with columns (metric_name, metric_regex): """ ,lambda _lowerCamelCase : str(_lowerCamelCase ).lower() ,)
_lowerCAmelCase : List[Any] = _ask_options(
"""What is the distributed mode?""" ,["""No distributed training""", """Data parallelism"""] ,_convert_sagemaker_distributed_mode ,)
_lowerCAmelCase : List[Any] = {}
_lowerCAmelCase : Union[str, Any] = _ask_field(
"""Do you wish to optimize your script with torch dynamo?[yes/NO]:""" ,_convert_yes_no_to_bool ,default=_lowerCamelCase ,error_message="""Please enter yes or no.""" ,)
if use_dynamo:
_lowerCAmelCase : Tuple = """dynamo_"""
_lowerCAmelCase : List[Any] = _ask_options(
"""Which dynamo backend would you like to use?""" ,[x.lower() for x in DYNAMO_BACKENDS] ,_convert_dynamo_backend ,default=2 ,)
_lowerCAmelCase : int = _ask_field(
"""Do you want to customize the defaults sent to torch.compile? [yes/NO]: """ ,_convert_yes_no_to_bool ,default=_lowerCamelCase ,error_message="""Please enter yes or no.""" ,)
if use_custom_options:
_lowerCAmelCase : Dict = _ask_options(
"""Which mode do you want to use?""" ,_lowerCamelCase ,lambda _lowerCamelCase : TORCH_DYNAMO_MODES[int(_lowerCamelCase )] ,default="""default""" ,)
_lowerCAmelCase : List[Any] = _ask_field(
"""Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: """ ,_convert_yes_no_to_bool ,default=_lowerCamelCase ,error_message="""Please enter yes or no.""" ,)
_lowerCAmelCase : Dict = _ask_field(
"""Do you want to enable dynamic shape tracing? [yes/NO]: """ ,_convert_yes_no_to_bool ,default=_lowerCamelCase ,error_message="""Please enter yes or no.""" ,)
_lowerCAmelCase : Any = """Which EC2 instance type you want to use for your training?"""
if distributed_type != SageMakerDistributedType.NO:
_lowerCAmelCase : List[Any] = _ask_options(
_lowerCamelCase ,_lowerCamelCase ,lambda _lowerCamelCase : SAGEMAKER_PARALLEL_EC2_INSTANCES[int(_lowerCamelCase )] )
else:
eca_instance_query += "? [ml.p3.2xlarge]:"
_lowerCAmelCase : int = _ask_field(_lowerCamelCase ,lambda _lowerCamelCase : str(_lowerCamelCase ).lower() ,default="""ml.p3.2xlarge""" )
_lowerCAmelCase : int = 1
if distributed_type in (SageMakerDistributedType.DATA_PARALLEL, SageMakerDistributedType.MODEL_PARALLEL):
_lowerCAmelCase : List[Any] = _ask_field(
"""How many machines do you want use? [1]: """ ,_lowerCamelCase ,default=1 ,)
_lowerCAmelCase : Optional[Any] = _ask_options(
"""Do you wish to use FP16 or BF16 (mixed precision)?""" ,["""no""", """fp16""", """bf16""", """fp8"""] ,_convert_mixed_precision ,)
if use_dynamo and mixed_precision == "no":
print(
"""Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts.""" )
return SageMakerConfig(
image_uri=_lowerCamelCase ,compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER ,distributed_type=_lowerCamelCase ,use_cpu=_lowerCamelCase ,dynamo_config=_lowerCamelCase ,eca_instance_type=_lowerCamelCase ,profile=_lowerCamelCase ,region=_lowerCamelCase ,iam_role_name=_lowerCamelCase ,mixed_precision=_lowerCamelCase ,num_machines=_lowerCamelCase ,sagemaker_inputs_file=_lowerCamelCase ,sagemaker_metrics_file=_lowerCamelCase ,)
| 44
|
"""simple docstring"""
import numpy as np
import qiskit
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int = 8 ,_lowerCamelCase : int | None = None ) -> str:
_lowerCAmelCase : int = np.random.default_rng(seed=_lowerCamelCase )
# Roughly 25% of the qubits will contribute to the key.
# So we take more than we need.
_lowerCAmelCase : Tuple = 6 * key_len
# Measurement basis for Alice's qubits.
_lowerCAmelCase : Dict = rng.integers(2 ,size=_lowerCamelCase )
# The set of states Alice will prepare.
_lowerCAmelCase : Tuple = rng.integers(2 ,size=_lowerCamelCase )
# Measurement basis for Bob's qubits.
_lowerCAmelCase : Union[str, Any] = rng.integers(2 ,size=_lowerCamelCase )
# Quantum Circuit to simulate BB84
_lowerCAmelCase : Dict = qiskit.QuantumCircuit(_lowerCamelCase ,name="""BB84""" )
# Alice prepares her qubits according to rules above.
for index, _ in enumerate(_lowerCamelCase ):
if alice_state[index] == 1:
bbaa_circ.x(_lowerCamelCase )
if alice_basis[index] == 1:
bbaa_circ.h(_lowerCamelCase )
bbaa_circ.barrier()
# Bob measures the received qubits according to rules above.
for index, _ in enumerate(_lowerCamelCase ):
if bob_basis[index] == 1:
bbaa_circ.h(_lowerCamelCase )
bbaa_circ.barrier()
bbaa_circ.measure_all()
# Simulate the quantum circuit.
_lowerCAmelCase : int = qiskit.Aer.get_backend("""aer_simulator""" )
# We only need to run one shot because the key is unique.
# Multiple shots will produce the same key.
_lowerCAmelCase : List[str] = qiskit.execute(_lowerCamelCase ,_lowerCamelCase ,shots=1 ,seed_simulator=_lowerCamelCase )
# Returns the result of measurement.
_lowerCAmelCase : List[Any] = job.result().get_counts(_lowerCamelCase ).most_frequent()
# Extracting the generated key from the simulation results.
# Only keep measurement results where Alice and Bob chose the same basis.
_lowerCAmelCase : str = """""".join(
[
result_bit
for alice_basis_bit, bob_basis_bit, result_bit in zip(
_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
if alice_basis_bit == bob_basis_bit
] )
# Get final key. Pad with 0 if too short, otherwise truncate.
_lowerCAmelCase : List[Any] = gen_key[:key_len] if len(_lowerCamelCase ) >= key_len else gen_key.ljust(_lowerCamelCase ,"""0""" )
return key
if __name__ == "__main__":
print(F"""The generated key is : {bbaa(8, seed=0)}""")
from doctest import testmod
testmod()
| 44
| 1
|
"""simple docstring"""
_a : List[str] = {
'Pillow': 'Pillow',
'accelerate': 'accelerate>=0.11.0',
'compel': 'compel==0.1.8',
'black': 'black~=23.1',
'datasets': 'datasets',
'filelock': 'filelock',
'flax': 'flax>=0.4.1',
'hf-doc-builder': 'hf-doc-builder>=0.3.0',
'huggingface-hub': 'huggingface-hub>=0.13.2',
'requests-mock': 'requests-mock==1.10.0',
'importlib_metadata': 'importlib_metadata',
'invisible-watermark': 'invisible-watermark',
'isort': 'isort>=5.5.4',
'jax': 'jax>=0.2.8,!=0.3.2',
'jaxlib': 'jaxlib>=0.1.65',
'Jinja2': 'Jinja2',
'k-diffusion': 'k-diffusion>=0.0.12',
'torchsde': 'torchsde',
'note_seq': 'note_seq',
'librosa': 'librosa',
'numpy': 'numpy',
'omegaconf': 'omegaconf',
'parameterized': 'parameterized',
'protobuf': 'protobuf>=3.20.3,<4',
'pytest': 'pytest',
'pytest-timeout': 'pytest-timeout',
'pytest-xdist': 'pytest-xdist',
'ruff': 'ruff>=0.0.241',
'safetensors': 'safetensors',
'sentencepiece': 'sentencepiece>=0.1.91,!=0.1.92',
'scipy': 'scipy',
'onnx': 'onnx',
'regex': 'regex!=2019.12.17',
'requests': 'requests',
'tensorboard': 'tensorboard',
'torch': 'torch>=1.4',
'torchvision': 'torchvision',
'transformers': 'transformers>=4.25.1',
'urllib3': 'urllib3<=2.0.0',
}
| 44
|
"""simple docstring"""
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
_a : Union[str, Any] = re.compile('[^A-Za-z_0-9]')
# parameters used in DuplicationIndex
_a : List[str] = 10
_a : List[Any] = 256
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[str] ) -> Optional[MinHash]:
if len(_lowerCamelCase ) < MIN_NUM_TOKENS:
return None
_lowerCAmelCase : Optional[Any] = MinHash(num_perm=_lowerCamelCase )
for token in set(_lowerCamelCase ):
min_hash.update(token.encode() )
return min_hash
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : str ) -> Set[str]:
return {t for t in NON_ALPHA.split(_lowerCamelCase ) if len(t.strip() ) > 0}
class __A :
def __init__( self , *,
a__ = 0.8_5 , ):
_lowerCAmelCase : List[Any] = duplication_jaccard_threshold
_lowerCAmelCase : Union[str, Any] = NUM_PERM
_lowerCAmelCase : Optional[int] = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
_lowerCAmelCase : Optional[int] = defaultdict(a__ )
def __A ( self , a__ , a__ ):
_lowerCAmelCase : Optional[Any] = self._index.query(a__ )
if code_key in self._index.keys:
print(F"Duplicate key {code_key}" )
return
self._index.insert(a__ , a__ )
if len(a__ ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(a__ )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(a__ )
def __A ( self ):
_lowerCAmelCase : int = []
for base, duplicates in self._duplicate_clusters.items():
_lowerCAmelCase : List[str] = [base] + list(a__ )
# reformat the cluster to be a list of dict
_lowerCAmelCase : List[Any] = [{"""base_index""": el[0], """repo_name""": el[1], """path""": el[2]} for el in cluster]
duplicate_clusters.append(a__ )
return duplicate_clusters
def __A ( self , a__ ):
_lowerCAmelCase : Dict = self.get_duplicate_clusters()
with open(a__ , """w""" ) as f:
json.dump(a__ , a__ )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[Any] ) -> Tuple:
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = element
_lowerCAmelCase : Tuple = get_min_hash([t for t in NON_ALPHA.split(data["""content"""] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Type[Dataset] ) -> Optional[Any]:
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash ,ThreadedIterator(_lowerCamelCase ,max_queue_size=10000 ) ,chunksize=100 ,):
if data is not None:
yield data
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Type[Dataset] ,_lowerCamelCase : float ) -> List[str]:
_lowerCAmelCase : Optional[Any] = DuplicationIndex(duplication_jaccard_threshold=_lowerCamelCase )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(_lowerCamelCase ) ) ,max_queue_size=100 ) ):
di.add(_lowerCamelCase ,_lowerCamelCase )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : str ,_lowerCamelCase : str ) -> float:
_lowerCAmelCase : Any = get_tokens(_lowerCamelCase )
_lowerCAmelCase : Optional[int] = get_tokens(_lowerCamelCase )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
_a : str = None
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int ,_lowerCamelCase : List[Any] ) -> Dict:
_lowerCAmelCase : int = []
for elementa in cluster:
_lowerCAmelCase : Dict = _shared_dataset[elementa["""base_index"""]]["""content"""]
for elementa in extremes:
_lowerCAmelCase : Any = _shared_dataset[elementa["""base_index"""]]["""content"""]
if jaccard_similarity(_lowerCamelCase ,_lowerCamelCase ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
_lowerCAmelCase : Any = 1
extremes.append(_lowerCamelCase )
return extremes
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[Any] ,_lowerCamelCase : Optional[Any] ,_lowerCamelCase : Any ) -> str:
global _shared_dataset
_lowerCAmelCase : Tuple = dataset
_lowerCAmelCase : Optional[Any] = []
_lowerCAmelCase : Optional[Any] = partial(_find_cluster_extremes_shared ,jaccard_threshold=_lowerCamelCase )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
_lowerCamelCase ,_lowerCamelCase ,) ,total=len(_lowerCamelCase ) ,):
extremes_list.append(_lowerCamelCase )
return extremes_list
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Type[Dataset] ,_lowerCamelCase : float = 0.85 ) -> Tuple[Type[Dataset], List[List[Dict]]]:
_lowerCAmelCase : Tuple = make_duplicate_clusters(_lowerCamelCase ,_lowerCamelCase )
_lowerCAmelCase : str = {x["""base_index"""] for cluster in duplicate_clusters for x in cluster}
_lowerCAmelCase : Optional[int] = {}
_lowerCAmelCase : Tuple = find_extremes(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
for extremes in extremes_clusters:
for element in extremes:
_lowerCAmelCase : Union[str, Any] = element
_lowerCAmelCase : List[Any] = duplicate_indices - set(extreme_dict.keys() )
_lowerCAmelCase : List[Any] = dataset.filter(lambda _lowerCamelCase ,_lowerCamelCase : idx not in remove_indices ,with_indices=_lowerCamelCase )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
_lowerCAmelCase : Tuple = element["""base_index"""] in extreme_dict
if element["is_extreme"]:
_lowerCAmelCase : Dict = extreme_dict[element["""base_index"""]]["""copies"""]
print(f"Original dataset size: {len(_lowerCamelCase )}" )
print(f"Number of duplicate clusters: {len(_lowerCamelCase )}" )
print(f"Files in duplicate cluster: {len(_lowerCamelCase )}" )
print(f"Unique files in duplicate cluster: {len(_lowerCamelCase )}" )
print(f"Filtered dataset size: {len(_lowerCamelCase )}" )
return ds_filter, duplicate_clusters
| 44
| 1
|
"""simple docstring"""
import copy
from typing import Dict, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
from ..detr import DetrConfig
from ..swin import SwinConfig
_a : str = {
'facebook/maskformer-swin-base-ade': (
'https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json'
)
# See all MaskFormer models at https://huggingface.co/models?filter=maskformer
}
_a : Optional[int] = logging.get_logger(__name__)
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Any = "maskformer"
_UpperCamelCase : Optional[Any] = {"hidden_size": "mask_feature_size"}
_UpperCamelCase : Any = ["resnet", "swin"]
_UpperCamelCase : Dict = ["detr"]
def __init__( self , a__ = 256 , a__ = 256 , a__ = 0.1 , a__ = False , a__ = None , a__ = None , a__ = 0.0_2 , a__ = 1.0 , a__ = 1.0 , a__ = 1.0 , a__ = 2_0.0 , a__ = None , **a__ , ):
if backbone_config is None:
# fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k
_lowerCAmelCase : Union[str, Any] = SwinConfig(
image_size=384 , in_channels=3 , patch_size=4 , embed_dim=128 , depths=[2, 2, 18, 2] , num_heads=[4, 8, 16, 32] , window_size=12 , drop_path_rate=0.3 , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] , )
if isinstance(a__ , a__ ):
_lowerCAmelCase : str = backbone_config.pop("""model_type""" )
_lowerCAmelCase : List[str] = CONFIG_MAPPING[backbone_model_type]
_lowerCAmelCase : Dict = config_class.from_dict(a__ )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
F"Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. "
F"Supported model types: {','.join(self.backbones_supported )}" )
if decoder_config is None:
# fall back to https://huggingface.co/facebook/detr-resnet-50
_lowerCAmelCase : int = DetrConfig()
else:
# verify that the decoder is supported
_lowerCAmelCase : Union[str, Any] = (
decoder_config.pop("""model_type""" ) if isinstance(a__ , a__ ) else decoder_config.model_type
)
if decoder_type not in self.decoders_supported:
raise ValueError(
F"Transformer Decoder {decoder_type} not supported, please use one of"
F" {','.join(self.decoders_supported )}" )
if isinstance(a__ , a__ ):
_lowerCAmelCase : int = CONFIG_MAPPING[decoder_type]
_lowerCAmelCase : Tuple = config_class.from_dict(a__ )
_lowerCAmelCase : List[Any] = backbone_config
_lowerCAmelCase : List[str] = decoder_config
# main feature dimension for the model
_lowerCAmelCase : int = fpn_feature_size
_lowerCAmelCase : Tuple = mask_feature_size
# initializer
_lowerCAmelCase : Any = init_std
_lowerCAmelCase : Any = init_xavier_std
# Hungarian matcher && loss
_lowerCAmelCase : Dict = cross_entropy_weight
_lowerCAmelCase : Union[str, Any] = dice_weight
_lowerCAmelCase : Tuple = mask_weight
_lowerCAmelCase : int = use_auxiliary_loss
_lowerCAmelCase : List[Any] = no_object_weight
_lowerCAmelCase : int = output_auxiliary_logits
_lowerCAmelCase : str = self.decoder_config.encoder_attention_heads
_lowerCAmelCase : int = self.decoder_config.num_hidden_layers
super().__init__(**a__ )
@classmethod
def __A ( cls , a__ , a__ , **a__ ):
return cls(
backbone_config=a__ , decoder_config=a__ , **a__ , )
def __A ( self ):
_lowerCAmelCase : Tuple = copy.deepcopy(self.__dict__ )
_lowerCAmelCase : str = self.backbone_config.to_dict()
_lowerCAmelCase : Dict = self.decoder_config.to_dict()
_lowerCAmelCase : List[Any] = self.__class__.model_type
return output
| 44
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a : List[Any] = logging.get_logger(__name__)
_a : Any = {
'microsoft/swinv2-tiny-patch4-window8-256': (
'https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json'
),
}
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : int = "swinv2"
_UpperCamelCase : List[str] = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self , a__=224 , a__=4 , a__=3 , a__=96 , a__=[2, 2, 6, 2] , a__=[3, 6, 12, 24] , a__=7 , a__=4.0 , a__=True , a__=0.0 , a__=0.0 , a__=0.1 , a__="gelu" , a__=False , a__=0.0_2 , a__=1e-5 , a__=32 , **a__ , ):
super().__init__(**a__ )
_lowerCAmelCase : int = image_size
_lowerCAmelCase : Optional[Any] = patch_size
_lowerCAmelCase : Any = num_channels
_lowerCAmelCase : List[Any] = embed_dim
_lowerCAmelCase : Optional[int] = depths
_lowerCAmelCase : List[Any] = len(a__ )
_lowerCAmelCase : Any = num_heads
_lowerCAmelCase : Tuple = window_size
_lowerCAmelCase : Tuple = mlp_ratio
_lowerCAmelCase : Any = qkv_bias
_lowerCAmelCase : Optional[int] = hidden_dropout_prob
_lowerCAmelCase : Tuple = attention_probs_dropout_prob
_lowerCAmelCase : str = drop_path_rate
_lowerCAmelCase : List[str] = hidden_act
_lowerCAmelCase : List[str] = use_absolute_embeddings
_lowerCAmelCase : Optional[int] = layer_norm_eps
_lowerCAmelCase : List[Any] = initializer_range
_lowerCAmelCase : Any = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_lowerCAmelCase : Tuple = int(embed_dim * 2 ** (len(a__ ) - 1) )
_lowerCAmelCase : Tuple = (0, 0, 0, 0)
| 44
| 1
|
"""simple docstring"""
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
_a : Any = logging.getLogger(__name__)
def SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]:
_lowerCAmelCase : List[Any] = argparse.ArgumentParser(
description="""Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids).""" )
parser.add_argument("""--file_path""" ,type=_lowerCamelCase ,default="""data/dump.txt""" ,help="""The path to the data.""" )
parser.add_argument("""--tokenizer_type""" ,type=_lowerCamelCase ,default="""bert""" ,choices=["""bert""", """roberta""", """gpt2"""] )
parser.add_argument("""--tokenizer_name""" ,type=_lowerCamelCase ,default="""bert-base-uncased""" ,help="""The tokenizer to use.""" )
parser.add_argument("""--dump_file""" ,type=_lowerCamelCase ,default="""data/dump""" ,help="""The dump file prefix.""" )
_lowerCAmelCase : int = parser.parse_args()
logger.info(f"Loading Tokenizer ({args.tokenizer_name})" )
if args.tokenizer_type == "bert":
_lowerCAmelCase : int = BertTokenizer.from_pretrained(args.tokenizer_name )
_lowerCAmelCase : List[Any] = tokenizer.special_tokens_map["""cls_token"""] # `[CLS]`
_lowerCAmelCase : Union[str, Any] = tokenizer.special_tokens_map["""sep_token"""] # `[SEP]`
elif args.tokenizer_type == "roberta":
_lowerCAmelCase : Optional[Any] = RobertaTokenizer.from_pretrained(args.tokenizer_name )
_lowerCAmelCase : Union[str, Any] = tokenizer.special_tokens_map["""cls_token"""] # `<s>`
_lowerCAmelCase : Dict = tokenizer.special_tokens_map["""sep_token"""] # `</s>`
elif args.tokenizer_type == "gpt2":
_lowerCAmelCase : Union[str, Any] = GPTaTokenizer.from_pretrained(args.tokenizer_name )
_lowerCAmelCase : Any = tokenizer.special_tokens_map["""bos_token"""] # `<|endoftext|>`
_lowerCAmelCase : List[Any] = tokenizer.special_tokens_map["""eos_token"""] # `<|endoftext|>`
logger.info(f"Loading text from {args.file_path}" )
with open(args.file_path ,"""r""" ,encoding="""utf8""" ) as fp:
_lowerCAmelCase : Tuple = fp.readlines()
logger.info("""Start encoding""" )
logger.info(f"{len(_lowerCamelCase )} examples to process." )
_lowerCAmelCase : Dict = []
_lowerCAmelCase : List[Any] = 0
_lowerCAmelCase : List[str] = 10000
_lowerCAmelCase : str = time.time()
for text in data:
_lowerCAmelCase : Optional[int] = f"{bos} {text.strip()} {sep}"
_lowerCAmelCase : Dict = tokenizer.encode(_lowerCamelCase ,add_special_tokens=_lowerCamelCase )
rslt.append(_lowerCamelCase )
iter += 1
if iter % interval == 0:
_lowerCAmelCase : int = time.time()
logger.info(f"{iter} examples processed. - {(end-start):.2f}s/{interval}expl" )
_lowerCAmelCase : List[Any] = time.time()
logger.info("""Finished binarization""" )
logger.info(f"{len(_lowerCamelCase )} examples processed." )
_lowerCAmelCase : List[Any] = f"{args.dump_file}.{args.tokenizer_name}.pickle"
_lowerCAmelCase : List[Any] = tokenizer.vocab_size
if vocab_size < (1 << 16):
_lowerCAmelCase : Tuple = [np.uintaa(_lowerCamelCase ) for d in rslt]
else:
_lowerCAmelCase : str = [np.intaa(_lowerCamelCase ) for d in rslt]
random.shuffle(rslt_ )
logger.info(f"Dump to {dp_file}" )
with open(_lowerCamelCase ,"""wb""" ) as handle:
pickle.dump(rslt_ ,_lowerCamelCase ,protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main()
| 44
|
"""simple docstring"""
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class __A ( unittest.TestCase ):
def __A ( self ):
_lowerCAmelCase : Optional[int] = """ylacombe/bark-small"""
_lowerCAmelCase : Optional[Any] = tempfile.mkdtemp()
_lowerCAmelCase : int = """en_speaker_1"""
_lowerCAmelCase : List[Any] = """This is a test string"""
_lowerCAmelCase : Any = """speaker_embeddings_path.json"""
_lowerCAmelCase : List[Any] = """speaker_embeddings"""
def __A ( self , **a__ ):
return AutoTokenizer.from_pretrained(self.checkpoint , **a__ )
def __A ( self ):
shutil.rmtree(self.tmpdirname )
def __A ( self ):
_lowerCAmelCase : List[Any] = self.get_tokenizer()
_lowerCAmelCase : int = BarkProcessor(tokenizer=a__ )
processor.save_pretrained(self.tmpdirname )
_lowerCAmelCase : str = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def __A ( self ):
_lowerCAmelCase : Optional[int] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
_lowerCAmelCase : Tuple = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
_lowerCAmelCase : List[Any] = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="""(BOS)""" , eos_token="""(EOS)""" , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def __A ( self ):
_lowerCAmelCase : List[str] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
_lowerCAmelCase : Union[str, Any] = 35
_lowerCAmelCase : Union[str, Any] = 2
_lowerCAmelCase : Optional[int] = 8
_lowerCAmelCase : Dict = {
"""semantic_prompt""": np.ones(a__ ),
"""coarse_prompt""": np.ones((nb_codebooks_coarse, seq_len) ),
"""fine_prompt""": np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
_lowerCAmelCase : Dict = processor(text=self.input_string , voice_preset=a__ )
_lowerCAmelCase : Tuple = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(a__ , np.array([] ) ).tolist() )
# test loading voice preset from npz file
_lowerCAmelCase : List[Any] = os.path.join(self.tmpdirname , """file.npz""" )
np.savez(a__ , **a__ )
_lowerCAmelCase : List[Any] = processor(text=self.input_string , voice_preset=a__ )
_lowerCAmelCase : Optional[int] = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(a__ , np.array([] ) ).tolist() )
# test loading voice preset from the hub
_lowerCAmelCase : str = processor(text=self.input_string , voice_preset=self.voice_preset )
def __A ( self ):
_lowerCAmelCase : int = self.get_tokenizer()
_lowerCAmelCase : List[Any] = BarkProcessor(tokenizer=a__ )
_lowerCAmelCase : Dict = processor(text=self.input_string )
_lowerCAmelCase : Tuple = tokenizer(
self.input_string , padding="""max_length""" , max_length=256 , add_special_tokens=a__ , return_attention_mask=a__ , return_token_type_ids=a__ , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 44
| 1
|
"""simple docstring"""
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
_a : Dict = logging.get_logger(__name__)
@add_end_docstrings(SCREAMING_SNAKE_CASE_ )
class __A ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , *a__ , **a__ ):
super().__init__(*a__ , **a__ )
requires_backends(self , """vision""" )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == """tf""" else MODEL_FOR_VISION_2_SEQ_MAPPING )
def __A ( self , a__=None , a__=None , a__=None ):
_lowerCAmelCase : List[str] = {}
_lowerCAmelCase : Union[str, Any] = {}
if prompt is not None:
_lowerCAmelCase : List[Any] = prompt
if generate_kwargs is not None:
_lowerCAmelCase : List[str] = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
_lowerCAmelCase : str = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
"""'max_new_tokens' is defined twice, once in 'generate_kwargs' and once as a direct parameter,"""
""" please use only one""" )
_lowerCAmelCase : Optional[Any] = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self , a__ , **a__ ):
return super().__call__(a__ , **a__ )
def __A ( self , a__ , a__=None ):
_lowerCAmelCase : Tuple = load_image(a__ )
if prompt is not None:
if not isinstance(a__ , a__ ):
raise ValueError(
F"Received an invalid text input, got - {type(a__ )} - but expected a single string. "
"""Note also that one single text can be provided for conditional image to text generation.""" )
_lowerCAmelCase : Optional[int] = self.model.config.model_type
if model_type == "git":
_lowerCAmelCase : Optional[Any] = self.image_processor(images=a__ , return_tensors=self.framework )
_lowerCAmelCase : List[str] = self.tokenizer(text=a__ , add_special_tokens=a__ ).input_ids
_lowerCAmelCase : Union[str, Any] = [self.tokenizer.cls_token_id] + input_ids
_lowerCAmelCase : Dict = torch.tensor(a__ ).unsqueeze(0 )
model_inputs.update({"""input_ids""": input_ids} )
elif model_type == "pix2struct":
_lowerCAmelCase : Tuple = self.image_processor(images=a__ , header_text=a__ , return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
_lowerCAmelCase : Optional[int] = self.image_processor(images=a__ , return_tensors=self.framework )
_lowerCAmelCase : Optional[int] = self.tokenizer(a__ , return_tensors=self.framework )
model_inputs.update(a__ )
else:
raise ValueError(F"Model type {model_type} does not support conditional text generation" )
else:
_lowerCAmelCase : Any = self.image_processor(images=a__ , return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
_lowerCAmelCase : Union[str, Any] = None
return model_inputs
def __A ( self , a__ , a__=None ):
# Git model sets `model_inputs["input_ids"] = None` in `preprocess` (when `prompt=None`). In batch model, the
# pipeline will group them into a list of `None`, which fail `_forward`. Avoid this by checking it first.
if (
"input_ids" in model_inputs
and isinstance(model_inputs["""input_ids"""] , a__ )
and all(x is None for x in model_inputs["""input_ids"""] )
):
_lowerCAmelCase : Optional[int] = None
if generate_kwargs is None:
_lowerCAmelCase : List[str] = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
_lowerCAmelCase : Tuple = model_inputs.pop(self.model.main_input_name )
_lowerCAmelCase : Union[str, Any] = self.model.generate(a__ , **a__ , **a__ )
return model_outputs
def __A ( self , a__ ):
_lowerCAmelCase : Optional[int] = []
for output_ids in model_outputs:
_lowerCAmelCase : Any = {
"""generated_text""": self.tokenizer.decode(
a__ , skip_special_tokens=a__ , )
}
records.append(a__ )
return records
| 44
|
"""simple docstring"""
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEmbeddings,
BertLayer,
BertPooler,
BertPreTrainedModel,
)
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, Any] ) -> Dict:
_lowerCAmelCase : List[Any] = torch.exp(_lowerCamelCase )
_lowerCAmelCase : List[Any] = torch.sum(_lowerCamelCase ,dim=1 ) # sum of exp(x_i)
_lowerCAmelCase : Dict = torch.sum(x * exp_x ,dim=1 ) # sum of x_i * exp(x_i)
return torch.log(_lowerCamelCase ) - B / A
class __A ( nn.Module ):
def __init__( self , a__ ):
super().__init__()
_lowerCAmelCase : int = config.output_attentions
_lowerCAmelCase : Any = config.output_hidden_states
_lowerCAmelCase : List[Any] = nn.ModuleList([BertLayer(a__ ) for _ in range(config.num_hidden_layers )] )
_lowerCAmelCase : Any = nn.ModuleList([BertHighway(a__ ) for _ in range(config.num_hidden_layers )] )
_lowerCAmelCase : str = [-1 for _ in range(config.num_hidden_layers )]
def __A ( self , a__ ):
if (type(a__ ) is float) or (type(a__ ) is int):
for i in range(len(self.early_exit_entropy ) ):
_lowerCAmelCase : Tuple = x
else:
_lowerCAmelCase : Optional[int] = x
def __A ( self , a__ ):
_lowerCAmelCase : Optional[int] = pooler.state_dict()
for highway in self.highway:
for name, param in highway.pooler.state_dict().items():
param.copy_(loaded_model[name] )
def __A ( self , a__ , a__=None , a__=None , a__=None , a__=None , ):
_lowerCAmelCase : Any = ()
_lowerCAmelCase : Optional[int] = ()
_lowerCAmelCase : List[Any] = ()
for i, layer_module in enumerate(self.layer ):
if self.output_hidden_states:
_lowerCAmelCase : str = all_hidden_states + (hidden_states,)
_lowerCAmelCase : List[str] = layer_module(
a__ , a__ , head_mask[i] , a__ , a__ )
_lowerCAmelCase : Union[str, Any] = layer_outputs[0]
if self.output_attentions:
_lowerCAmelCase : Dict = all_attentions + (layer_outputs[1],)
_lowerCAmelCase : Optional[int] = (hidden_states,)
if self.output_hidden_states:
_lowerCAmelCase : Union[str, Any] = current_outputs + (all_hidden_states,)
if self.output_attentions:
_lowerCAmelCase : Optional[int] = current_outputs + (all_attentions,)
_lowerCAmelCase : Optional[Any] = self.highway[i](a__ )
# logits, pooled_output
if not self.training:
_lowerCAmelCase : Tuple = highway_exit[0]
_lowerCAmelCase : Any = entropy(a__ )
_lowerCAmelCase : Optional[Any] = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy
_lowerCAmelCase : Union[str, Any] = all_highway_exits + (highway_exit,)
if highway_entropy < self.early_exit_entropy[i]:
_lowerCAmelCase : List[str] = (highway_logits,) + current_outputs[1:] + (all_highway_exits,)
raise HighwayException(a__ , i + 1 )
else:
_lowerCAmelCase : Dict = all_highway_exits + (highway_exit,)
# Add last layer
if self.output_hidden_states:
_lowerCAmelCase : List[Any] = all_hidden_states + (hidden_states,)
_lowerCAmelCase : List[Any] = (hidden_states,)
if self.output_hidden_states:
_lowerCAmelCase : List[str] = outputs + (all_hidden_states,)
if self.output_attentions:
_lowerCAmelCase : Any = outputs + (all_attentions,)
_lowerCAmelCase : Optional[int] = outputs + (all_highway_exits,)
return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits
@add_start_docstrings(
"The Bert Model transformer with early exiting (DeeBERT). " , SCREAMING_SNAKE_CASE_ , )
class __A ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , a__ ):
super().__init__(a__ )
_lowerCAmelCase : Any = config
_lowerCAmelCase : Tuple = BertEmbeddings(a__ )
_lowerCAmelCase : Tuple = DeeBertEncoder(a__ )
_lowerCAmelCase : List[str] = BertPooler(a__ )
self.init_weights()
def __A ( self ):
self.encoder.init_highway_pooler(self.pooler )
def __A ( self ):
return self.embeddings.word_embeddings
def __A ( self , a__ ):
_lowerCAmelCase : Dict = value
def __A ( self , a__ ):
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(a__ )
@add_start_docstrings_to_model_forward(a__ )
def __A ( self , a__=None , a__=None , a__=None , a__=None , a__=None , a__=None , a__=None , a__=None , ):
if input_ids is not None and inputs_embeds is not None:
raise ValueError("""You cannot specify both input_ids and inputs_embeds at the same time""" )
elif input_ids is not None:
_lowerCAmelCase : Any = input_ids.size()
elif inputs_embeds is not None:
_lowerCAmelCase : List[str] = inputs_embeds.size()[:-1]
else:
raise ValueError("""You have to specify either input_ids or inputs_embeds""" )
_lowerCAmelCase : str = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
_lowerCAmelCase : List[Any] = torch.ones(a__ , device=a__ )
if encoder_attention_mask is None:
_lowerCAmelCase : Optional[Any] = torch.ones(a__ , device=a__ )
if token_type_ids is None:
_lowerCAmelCase : Dict = torch.zeros(a__ , dtype=torch.long , device=a__ )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
_lowerCAmelCase : torch.Tensor = self.get_extended_attention_mask(a__ , a__ , a__ )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_attention_mask.dim() == 3:
_lowerCAmelCase : Dict = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
_lowerCAmelCase : Tuple = encoder_attention_mask[:, None, None, :]
_lowerCAmelCase : Union[str, Any] = encoder_extended_attention_mask.to(
dtype=next(self.parameters() ).dtype ) # fp16 compatibility
_lowerCAmelCase : Optional[Any] = (1.0 - encoder_extended_attention_mask) * -1_0_0_0_0.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
_lowerCAmelCase : Optional[int] = self.get_head_mask(a__ , self.config.num_hidden_layers )
_lowerCAmelCase : Dict = self.embeddings(
input_ids=a__ , position_ids=a__ , token_type_ids=a__ , inputs_embeds=a__ )
_lowerCAmelCase : Union[str, Any] = self.encoder(
a__ , attention_mask=a__ , head_mask=a__ , encoder_hidden_states=a__ , encoder_attention_mask=a__ , )
_lowerCAmelCase : Dict = encoder_outputs[0]
_lowerCAmelCase : Union[str, Any] = self.pooler(a__ )
_lowerCAmelCase : Dict = (
sequence_output,
pooled_output,
) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits
class __A ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , a__ , a__ ):
_lowerCAmelCase : str = message
_lowerCAmelCase : str = exit_layer # start from 1!
class __A ( nn.Module ):
def __init__( self , a__ ):
super().__init__()
_lowerCAmelCase : Any = BertPooler(a__ )
_lowerCAmelCase : str = nn.Dropout(config.hidden_dropout_prob )
_lowerCAmelCase : Union[str, Any] = nn.Linear(config.hidden_size , config.num_labels )
def __A ( self , a__ ):
# Pooler
_lowerCAmelCase : Tuple = encoder_outputs[0]
_lowerCAmelCase : int = self.pooler(a__ )
# "return" pooler_output
# BertModel
_lowerCAmelCase : Union[str, Any] = (pooler_input, pooler_output) + encoder_outputs[1:]
# "return" bmodel_output
# Dropout and classification
_lowerCAmelCase : Optional[int] = bmodel_output[1]
_lowerCAmelCase : Tuple = self.dropout(a__ )
_lowerCAmelCase : Dict = self.classifier(a__ )
return logits, pooled_output
@add_start_docstrings(
"Bert Model (with early exiting - DeeBERT) with a classifier on top,\n also takes care of multi-layer training. " , SCREAMING_SNAKE_CASE_ , )
class __A ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , a__ ):
super().__init__(a__ )
_lowerCAmelCase : List[str] = config.num_labels
_lowerCAmelCase : Optional[Any] = config.num_hidden_layers
_lowerCAmelCase : str = DeeBertModel(a__ )
_lowerCAmelCase : Tuple = nn.Dropout(config.hidden_dropout_prob )
_lowerCAmelCase : List[Any] = nn.Linear(config.hidden_size , self.config.num_labels )
self.init_weights()
@add_start_docstrings_to_model_forward(a__ )
def __A ( self , a__=None , a__=None , a__=None , a__=None , a__=None , a__=None , a__=None , a__=-1 , a__=False , ):
_lowerCAmelCase : Dict = self.num_layers
try:
_lowerCAmelCase : str = self.bert(
a__ , attention_mask=a__ , token_type_ids=a__ , position_ids=a__ , head_mask=a__ , inputs_embeds=a__ , )
# sequence_output, pooled_output, (hidden_states), (attentions), highway exits
_lowerCAmelCase : Any = outputs[1]
_lowerCAmelCase : Optional[int] = self.dropout(a__ )
_lowerCAmelCase : List[str] = self.classifier(a__ )
_lowerCAmelCase : Union[str, Any] = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
_lowerCAmelCase : Tuple = e.message
_lowerCAmelCase : int = e.exit_layer
_lowerCAmelCase : Union[str, Any] = outputs[0]
if not self.training:
_lowerCAmelCase : Tuple = entropy(a__ )
_lowerCAmelCase : Optional[int] = []
_lowerCAmelCase : Optional[Any] = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
_lowerCAmelCase : Tuple = MSELoss()
_lowerCAmelCase : int = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
_lowerCAmelCase : Any = CrossEntropyLoss()
_lowerCAmelCase : Optional[int] = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
_lowerCAmelCase : Optional[Any] = []
for highway_exit in outputs[-1]:
_lowerCAmelCase : Dict = highway_exit[0]
if not self.training:
highway_logits_all.append(a__ )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
_lowerCAmelCase : List[Any] = MSELoss()
_lowerCAmelCase : int = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
_lowerCAmelCase : Optional[int] = CrossEntropyLoss()
_lowerCAmelCase : List[Any] = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(a__ )
if train_highway:
_lowerCAmelCase : List[Any] = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
_lowerCAmelCase : Any = (loss,) + outputs
if not self.training:
_lowerCAmelCase : Dict = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
_lowerCAmelCase : Dict = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
| 44
| 1
|
"""simple docstring"""
# Algorithm for the pigeonhole sorting
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[str] ) -> Dict:
_lowerCAmelCase : Any = min(_lowerCamelCase ) # min() finds the minimum value
_lowerCAmelCase : Dict = max(_lowerCamelCase ) # max() finds the maximum value
_lowerCAmelCase : Tuple = max_val - min_val + 1 # size is difference of max and min values plus one
# list of pigeonholes of size equal to the variable size
_lowerCAmelCase : Optional[Any] = [0] * size
# Populate the pigeonholes.
for x in a:
assert isinstance(_lowerCamelCase ,_lowerCamelCase ), "integers only please"
holes[x - min_val] += 1
# Putting the elements back into the array in an order.
_lowerCAmelCase : Tuple = 0
for count in range(_lowerCamelCase ):
while holes[count] > 0:
holes[count] -= 1
_lowerCAmelCase : Optional[int] = count + min_val
i += 1
def SCREAMING_SNAKE_CASE ( ) -> List[Any]:
_lowerCAmelCase : List[str] = [8, 3, 2, 7, 4, 6, 8]
pigeonhole_sort(_lowerCamelCase )
print("""Sorted order is:""" ,""" """.join(_lowerCamelCase ) )
if __name__ == "__main__":
main()
| 44
|
"""simple docstring"""
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Union[str, Any] = ""
_UpperCamelCase : str = "hf-legacy" # "hf://"" is reserved for hffs
def __init__( self , a__ = None , a__ = None , **a__ , ):
super().__init__(self , **a__ )
_lowerCAmelCase : Any = repo_info
_lowerCAmelCase : Optional[Any] = token
_lowerCAmelCase : Optional[int] = None
def __A ( self ):
if self.dir_cache is None:
_lowerCAmelCase : Optional[Any] = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
_lowerCAmelCase : Any = {
"""name""": hf_file.rfilename,
"""size""": None,
"""type""": """file""",
}
self.dir_cache.update(
{
str(a__ ): {"""name""": str(a__ ), """size""": None, """type""": """directory"""}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def __A ( self , a__ , a__ = "rb" , **a__ , ):
if not isinstance(self.repo_info , a__ ):
raise NotImplementedError(F"Open is only implemented for dataset repositories, but got {self.repo_info}" )
_lowerCAmelCase : Tuple = hf_hub_url(self.repo_info.id , a__ , revision=self.repo_info.sha )
return fsspec.open(
a__ , mode=a__ , headers=get_authentication_headers_for_url(a__ , use_auth_token=self.token ) , client_kwargs={"""trust_env""": True} , ).open()
def __A ( self , a__ , **a__ ):
self._get_dirs()
_lowerCAmelCase : Union[str, Any] = self._strip_protocol(a__ )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(a__ )
def __A ( self , a__ , a__=False , **a__ ):
self._get_dirs()
_lowerCAmelCase : Any = PurePosixPath(path.strip("""/""" ) )
_lowerCAmelCase : List[str] = {}
for p, f in self.dir_cache.items():
_lowerCAmelCase : Any = PurePosixPath(p.strip("""/""" ) )
_lowerCAmelCase : Optional[int] = p.parent
if root == path:
_lowerCAmelCase : Dict = f
_lowerCAmelCase : Union[str, Any] = list(paths.values() )
if detail:
return out
else:
return sorted(f["""name"""] for f in out )
| 44
| 1
|
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Optional[Any] = ["image_processor", "tokenizer"]
_UpperCamelCase : List[Any] = "CLIPImageProcessor"
_UpperCamelCase : str = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self , a__=None , a__=None , **a__ ):
_lowerCAmelCase : Tuple = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , a__ , )
_lowerCAmelCase : Tuple = kwargs.pop("""feature_extractor""" )
_lowerCAmelCase : str = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(a__ , a__ )
def __call__( self , a__=None , a__=None , a__=None , **a__ ):
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
_lowerCAmelCase : Optional[Any] = self.tokenizer(a__ , return_tensors=a__ , **a__ )
if images is not None:
_lowerCAmelCase : int = self.image_processor(a__ , return_tensors=a__ , **a__ )
if text is not None and images is not None:
_lowerCAmelCase : str = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**a__ ) , tensor_type=a__ )
def __A ( self , *a__ , **a__ ):
return self.tokenizer.batch_decode(*a__ , **a__ )
def __A ( self , *a__ , **a__ ):
return self.tokenizer.decode(*a__ , **a__ )
@property
def __A ( self ):
_lowerCAmelCase : str = self.tokenizer.model_input_names
_lowerCAmelCase : Optional[int] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def __A ( self ):
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , a__ , )
return self.image_processor_class
@property
def __A ( self ):
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , a__ , )
return self.image_processor
| 44
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __A ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : List[Any] = KandinskyImgaImgPipeline
_UpperCamelCase : Optional[Any] = ["prompt", "image_embeds", "negative_image_embeds", "image"]
_UpperCamelCase : List[Any] = [
"prompt",
"negative_prompt",
"image_embeds",
"negative_image_embeds",
"image",
]
_UpperCamelCase : Dict = [
"generator",
"height",
"width",
"strength",
"guidance_scale",
"negative_prompt",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
_UpperCamelCase : Union[str, Any] = False
@property
def __A ( self ):
return 32
@property
def __A ( self ):
return 32
@property
def __A ( self ):
return self.time_input_dim
@property
def __A ( self ):
return self.time_input_dim * 4
@property
def __A ( self ):
return 100
@property
def __A ( self ):
_lowerCAmelCase : Optional[Any] = XLMRobertaTokenizerFast.from_pretrained("""YiYiXu/tiny-random-mclip-base""" )
return tokenizer
@property
def __A ( self ):
torch.manual_seed(0 )
_lowerCAmelCase : List[Any] = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , )
_lowerCAmelCase : int = MultilingualCLIP(a__ )
_lowerCAmelCase : Union[str, Any] = text_encoder.eval()
return text_encoder
@property
def __A ( self ):
torch.manual_seed(0 )
_lowerCAmelCase : str = {
"""in_channels""": 4,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """text_image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """text_image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
_lowerCAmelCase : Optional[Any] = UNetaDConditionModel(**a__ )
return model
@property
def __A ( self ):
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __A ( self ):
torch.manual_seed(0 )
_lowerCAmelCase : str = VQModel(**self.dummy_movq_kwargs )
return model
def __A ( self ):
_lowerCAmelCase : Union[str, Any] = self.dummy_text_encoder
_lowerCAmelCase : List[Any] = self.dummy_tokenizer
_lowerCAmelCase : int = self.dummy_unet
_lowerCAmelCase : Dict = self.dummy_movq
_lowerCAmelCase : Tuple = {
"""num_train_timesteps""": 1000,
"""beta_schedule""": """linear""",
"""beta_start""": 0.0_0_0_8_5,
"""beta_end""": 0.0_1_2,
"""clip_sample""": False,
"""set_alpha_to_one""": False,
"""steps_offset""": 0,
"""prediction_type""": """epsilon""",
"""thresholding""": False,
}
_lowerCAmelCase : Optional[Any] = DDIMScheduler(**a__ )
_lowerCAmelCase : List[Any] = {
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def __A ( self , a__ , a__=0 ):
_lowerCAmelCase : Optional[int] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(a__ ) ).to(a__ )
_lowerCAmelCase : int = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(a__ )
# create init_image
_lowerCAmelCase : List[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(a__ ) ).to(a__ )
_lowerCAmelCase : int = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_lowerCAmelCase : List[Any] = Image.fromarray(np.uinta(a__ ) ).convert("""RGB""" ).resize((256, 256) )
if str(a__ ).startswith("""mps""" ):
_lowerCAmelCase : List[Any] = torch.manual_seed(a__ )
else:
_lowerCAmelCase : Tuple = torch.Generator(device=a__ ).manual_seed(a__ )
_lowerCAmelCase : Optional[Any] = {
"""prompt""": """horse""",
"""image""": init_image,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 10,
"""guidance_scale""": 7.0,
"""strength""": 0.2,
"""output_type""": """np""",
}
return inputs
def __A ( self ):
_lowerCAmelCase : Any = """cpu"""
_lowerCAmelCase : int = self.get_dummy_components()
_lowerCAmelCase : int = self.pipeline_class(**a__ )
_lowerCAmelCase : Optional[int] = pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
_lowerCAmelCase : Tuple = pipe(**self.get_dummy_inputs(a__ ) )
_lowerCAmelCase : List[Any] = output.images
_lowerCAmelCase : Tuple = pipe(
**self.get_dummy_inputs(a__ ) , return_dict=a__ , )[0]
_lowerCAmelCase : Dict = image[0, -3:, -3:, -1]
_lowerCAmelCase : List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_lowerCAmelCase : str = np.array(
[0.6_1_4_7_4_9_4_3, 0.6_0_7_3_5_3_9, 0.4_3_3_0_8_5_4_4, 0.5_9_2_8_2_6_9, 0.4_7_4_9_3_5_9_5, 0.4_6_7_5_5_9_7_3, 0.4_6_1_3_8_3_8, 0.4_5_3_6_8_7_9_7, 0.5_0_1_1_9_2_3_3] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
@slow
@require_torch_gpu
class __A ( unittest.TestCase ):
def __A ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self ):
_lowerCAmelCase : Dict = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinsky/kandinsky_img2img_frog.npy""" )
_lowerCAmelCase : List[str] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
_lowerCAmelCase : Union[str, Any] = """A red cartoon frog, 4k"""
_lowerCAmelCase : int = KandinskyPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(a__ )
_lowerCAmelCase : Tuple = KandinskyImgaImgPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1""" , torch_dtype=torch.floataa )
_lowerCAmelCase : Any = pipeline.to(a__ )
pipeline.set_progress_bar_config(disable=a__ )
_lowerCAmelCase : Any = torch.Generator(device="""cpu""" ).manual_seed(0 )
_lowerCAmelCase , _lowerCAmelCase : Dict = pipe_prior(
a__ , generator=a__ , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
_lowerCAmelCase : Union[str, Any] = pipeline(
a__ , image=a__ , image_embeds=a__ , negative_image_embeds=a__ , generator=a__ , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type="""np""" , )
_lowerCAmelCase : Dict = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(a__ , a__ )
| 44
| 1
|
"""simple docstring"""
import math
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[int] ,_lowerCamelCase : List[Any] ) -> Tuple:
if 0 not in (x, y):
# We use the relation x^y = y*log10(x), where 10 is the base.
return y * math.logaa(_lowerCamelCase )
else:
if x == 0: # 0 raised to any number is 0
return 0
elif y == 0:
return 1 # any number raised to 0 is 1
raise AssertionError("""This should never happen""" )
if __name__ == "__main__": # Main function
# Read two numbers from input and typecast them to int using map function.
# Here x is the base and y is the power.
_a : Dict = 'Enter the base and the power separated by a comma: '
_a , _a : Dict = map(int, input(prompt).split(','))
_a , _a : int = map(int, input(prompt).split(','))
# We find the log of each number, using the function res(), which takes two
# arguments.
_a : List[Any] = res(xa, ya)
_a : Dict = res(xa, ya)
# We check for the largest number
if resa > resa:
print('Largest number is', xa, '^', ya)
elif resa > resa:
print('Largest number is', xa, '^', ya)
else:
print('Both are equal')
| 44
|
"""simple docstring"""
from math import ceil
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, Any] ,_lowerCamelCase : Union[str, Any] ) -> int:
_lowerCAmelCase : Dict = list(range(0 ,_lowerCamelCase ) )
_lowerCAmelCase : Tuple = [item for sublist in list(device_map.values() ) for item in sublist]
# Duplicate check
_lowerCAmelCase : Union[str, Any] = []
for i in device_map_blocks:
if device_map_blocks.count(_lowerCamelCase ) > 1 and i not in duplicate_blocks:
duplicate_blocks.append(_lowerCamelCase )
# Missing blocks
_lowerCAmelCase : int = [i for i in blocks if i not in device_map_blocks]
_lowerCAmelCase : List[Any] = [i for i in device_map_blocks if i not in blocks]
if len(_lowerCamelCase ) != 0:
raise ValueError(
"""Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device."""
""" These attention blocks were specified more than once: """ + str(_lowerCamelCase ) )
if len(_lowerCamelCase ) != 0:
raise ValueError(
"""There are attention blocks for this model that are not specified in the device_map. Add these attention """
"""blocks to a device on the device_map: """ + str(_lowerCamelCase ) )
if len(_lowerCamelCase ) != 0:
raise ValueError(
"""The device_map contains more attention blocks than this model has. Remove these from the device_map:"""
+ str(_lowerCamelCase ) )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int ,_lowerCamelCase : Tuple ) -> str:
_lowerCAmelCase : Optional[Any] = list(range(_lowerCamelCase ) )
_lowerCAmelCase : Optional[Any] = int(ceil(n_layers / len(_lowerCamelCase ) ) )
_lowerCAmelCase : Optional[int] = [layers[i : i + n_blocks] for i in range(0 ,_lowerCamelCase ,_lowerCamelCase )]
return dict(zip(_lowerCamelCase ,_lowerCamelCase ) )
| 44
| 1
|
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : str ) -> bool:
_lowerCAmelCase : Dict = [int(_lowerCamelCase ) for i in ip_va_address.split(""".""" ) if i.isdigit()]
return len(_lowerCamelCase ) == 4 and all(0 <= int(_lowerCamelCase ) <= 254 for octet in octets )
if __name__ == "__main__":
_a : Tuple = input().strip()
_a : Optional[int] = 'valid' if is_ip_va_address_valid(ip) else 'invalid'
print(F"""{ip} is a {valid_or_invalid} IP v4 address.""")
| 44
|
"""simple docstring"""
_a : List[str] = {
'Pillow': 'Pillow',
'accelerate': 'accelerate>=0.11.0',
'compel': 'compel==0.1.8',
'black': 'black~=23.1',
'datasets': 'datasets',
'filelock': 'filelock',
'flax': 'flax>=0.4.1',
'hf-doc-builder': 'hf-doc-builder>=0.3.0',
'huggingface-hub': 'huggingface-hub>=0.13.2',
'requests-mock': 'requests-mock==1.10.0',
'importlib_metadata': 'importlib_metadata',
'invisible-watermark': 'invisible-watermark',
'isort': 'isort>=5.5.4',
'jax': 'jax>=0.2.8,!=0.3.2',
'jaxlib': 'jaxlib>=0.1.65',
'Jinja2': 'Jinja2',
'k-diffusion': 'k-diffusion>=0.0.12',
'torchsde': 'torchsde',
'note_seq': 'note_seq',
'librosa': 'librosa',
'numpy': 'numpy',
'omegaconf': 'omegaconf',
'parameterized': 'parameterized',
'protobuf': 'protobuf>=3.20.3,<4',
'pytest': 'pytest',
'pytest-timeout': 'pytest-timeout',
'pytest-xdist': 'pytest-xdist',
'ruff': 'ruff>=0.0.241',
'safetensors': 'safetensors',
'sentencepiece': 'sentencepiece>=0.1.91,!=0.1.92',
'scipy': 'scipy',
'onnx': 'onnx',
'regex': 'regex!=2019.12.17',
'requests': 'requests',
'tensorboard': 'tensorboard',
'torch': 'torch>=1.4',
'torchvision': 'torchvision',
'transformers': 'transformers>=4.25.1',
'urllib3': 'urllib3<=2.0.0',
}
| 44
| 1
|
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int = 100 ) -> int:
_lowerCAmelCase : Tuple = set()
_lowerCAmelCase : Union[str, Any] = 0
_lowerCAmelCase : int = n + 1 # maximum limit
for a in range(2 ,_lowerCamelCase ):
for b in range(2 ,_lowerCamelCase ):
_lowerCAmelCase : Dict = a**b # calculates the current power
collect_powers.add(_lowerCamelCase ) # adds the result to the set
return len(_lowerCamelCase )
if __name__ == "__main__":
print('Number of terms ', solution(int(str(input()).strip())))
| 44
|
"""simple docstring"""
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
_a : Dict = logging.get_logger(__name__)
@add_end_docstrings(SCREAMING_SNAKE_CASE_ )
class __A ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , *a__ , **a__ ):
super().__init__(*a__ , **a__ )
requires_backends(self , """vision""" )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == """tf""" else MODEL_FOR_VISION_2_SEQ_MAPPING )
def __A ( self , a__=None , a__=None , a__=None ):
_lowerCAmelCase : List[str] = {}
_lowerCAmelCase : Union[str, Any] = {}
if prompt is not None:
_lowerCAmelCase : List[Any] = prompt
if generate_kwargs is not None:
_lowerCAmelCase : List[str] = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
_lowerCAmelCase : str = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
"""'max_new_tokens' is defined twice, once in 'generate_kwargs' and once as a direct parameter,"""
""" please use only one""" )
_lowerCAmelCase : Optional[Any] = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self , a__ , **a__ ):
return super().__call__(a__ , **a__ )
def __A ( self , a__ , a__=None ):
_lowerCAmelCase : Tuple = load_image(a__ )
if prompt is not None:
if not isinstance(a__ , a__ ):
raise ValueError(
F"Received an invalid text input, got - {type(a__ )} - but expected a single string. "
"""Note also that one single text can be provided for conditional image to text generation.""" )
_lowerCAmelCase : Optional[int] = self.model.config.model_type
if model_type == "git":
_lowerCAmelCase : Optional[Any] = self.image_processor(images=a__ , return_tensors=self.framework )
_lowerCAmelCase : List[str] = self.tokenizer(text=a__ , add_special_tokens=a__ ).input_ids
_lowerCAmelCase : Union[str, Any] = [self.tokenizer.cls_token_id] + input_ids
_lowerCAmelCase : Dict = torch.tensor(a__ ).unsqueeze(0 )
model_inputs.update({"""input_ids""": input_ids} )
elif model_type == "pix2struct":
_lowerCAmelCase : Tuple = self.image_processor(images=a__ , header_text=a__ , return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
_lowerCAmelCase : Optional[int] = self.image_processor(images=a__ , return_tensors=self.framework )
_lowerCAmelCase : Optional[int] = self.tokenizer(a__ , return_tensors=self.framework )
model_inputs.update(a__ )
else:
raise ValueError(F"Model type {model_type} does not support conditional text generation" )
else:
_lowerCAmelCase : Any = self.image_processor(images=a__ , return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
_lowerCAmelCase : Union[str, Any] = None
return model_inputs
def __A ( self , a__ , a__=None ):
# Git model sets `model_inputs["input_ids"] = None` in `preprocess` (when `prompt=None`). In batch model, the
# pipeline will group them into a list of `None`, which fail `_forward`. Avoid this by checking it first.
if (
"input_ids" in model_inputs
and isinstance(model_inputs["""input_ids"""] , a__ )
and all(x is None for x in model_inputs["""input_ids"""] )
):
_lowerCAmelCase : Optional[int] = None
if generate_kwargs is None:
_lowerCAmelCase : List[str] = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
_lowerCAmelCase : Tuple = model_inputs.pop(self.model.main_input_name )
_lowerCAmelCase : Union[str, Any] = self.model.generate(a__ , **a__ , **a__ )
return model_outputs
def __A ( self , a__ ):
_lowerCAmelCase : Optional[int] = []
for output_ids in model_outputs:
_lowerCAmelCase : Any = {
"""generated_text""": self.tokenizer.decode(
a__ , skip_special_tokens=a__ , )
}
records.append(a__ )
return records
| 44
| 1
|
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Tuple ) -> Any:
_lowerCAmelCase : Optional[int] = len(_lowerCamelCase )
while cur > 1:
# Find the maximum number in arr
_lowerCAmelCase : Any = arr.index(max(arr[0:cur] ) )
# Reverse from 0 to mi
_lowerCAmelCase : Union[str, Any] = arr[mi::-1] + arr[mi + 1 : len(_lowerCamelCase )]
# Reverse whole list
_lowerCAmelCase : Dict = arr[cur - 1 :: -1] + arr[cur : len(_lowerCamelCase )]
cur -= 1
return arr
if __name__ == "__main__":
_a : Union[str, Any] = input('Enter numbers separated by a comma:\n').strip()
_a : Dict = [int(item) for item in user_input.split(',')]
print(pancake_sort(unsorted))
| 44
|
"""simple docstring"""
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
_a : Dict = datasets.utils.logging.get_logger(__name__)
@dataclass
class __A ( datasets.BuilderConfig ):
_UpperCamelCase : int = 10_000
_UpperCamelCase : Optional[List[str]] = None
_UpperCamelCase : Optional[datasets.Features] = None
class __A ( datasets.ArrowBasedBuilder ):
_UpperCamelCase : List[str] = ParquetConfig
def __A ( self ):
return datasets.DatasetInfo(features=self.config.features )
def __A ( self , a__ ):
if not self.config.data_files:
raise ValueError(F"At least one data file must be specified, but got data_files={self.config.data_files}" )
_lowerCAmelCase : Optional[Any] = dl_manager.download_and_extract(self.config.data_files )
if isinstance(a__ , (str, list, tuple) ):
_lowerCAmelCase : Any = data_files
if isinstance(a__ , a__ ):
_lowerCAmelCase : Tuple = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_lowerCAmelCase : Any = [dl_manager.iter_files(a__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )]
_lowerCAmelCase : Optional[Any] = []
for split_name, files in data_files.items():
if isinstance(a__ , a__ ):
_lowerCAmelCase : Dict = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_lowerCAmelCase : Tuple = [dl_manager.iter_files(a__ ) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(a__ ):
with open(a__ , """rb""" ) as f:
_lowerCAmelCase : Optional[Any] = datasets.Features.from_arrow_schema(pq.read_schema(a__ ) )
break
splits.append(datasets.SplitGenerator(name=a__ , gen_kwargs={"""files""": files} ) )
return splits
def __A ( self , a__ ):
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
_lowerCAmelCase : Optional[int] = table_cast(a__ , self.info.features.arrow_schema )
return pa_table
def __A ( self , a__ ):
_lowerCAmelCase : Optional[int] = self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema ) != sorted(self.config.columns ):
raise ValueError(
F"Tried to load parquet data with columns '{self.config.columns}' with mismatching features '{self.info.features}'" )
for file_idx, file in enumerate(itertools.chain.from_iterable(a__ ) ):
with open(a__ , """rb""" ) as f:
_lowerCAmelCase : Tuple = pq.ParquetFile(a__ )
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ):
_lowerCAmelCase : Any = pa.Table.from_batches([record_batch] )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield F"{file_idx}_{batch_idx}", self._cast_table(a__ )
except ValueError as e:
logger.error(F"Failed to read file '{file}' with error {type(a__ )}: {e}" )
raise
| 44
| 1
|
"""simple docstring"""
import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
_a : str = {
'cola': 2,
'mnli': 3,
'mrpc': 2,
'sst-2': 2,
'sts-b': 1,
'qqp': 2,
'qnli': 2,
'rte': 2,
'wnli': 2,
}
logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, Any] ,_lowerCamelCase : str ,_lowerCamelCase : Dict ,_lowerCamelCase : str=None ) -> List[str]:
# Initialise PyTorch model
_lowerCAmelCase : str = XLNetConfig.from_json_file(_lowerCamelCase )
_lowerCAmelCase : int = finetuning_task.lower() if finetuning_task is not None else """"""
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(f"Building PyTorch XLNetForSequenceClassification model from configuration: {config}" )
_lowerCAmelCase : str = finetuning_task
_lowerCAmelCase : Optional[int] = GLUE_TASKS_NUM_LABELS[finetuning_task]
_lowerCAmelCase : Any = XLNetForSequenceClassification(_lowerCamelCase )
elif "squad" in finetuning_task:
_lowerCAmelCase : Optional[int] = finetuning_task
_lowerCAmelCase : List[str] = XLNetForQuestionAnswering(_lowerCamelCase )
else:
_lowerCAmelCase : List[str] = XLNetLMHeadModel(_lowerCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
# Save pytorch-model
_lowerCAmelCase : str = os.path.join(_lowerCamelCase ,_lowerCamelCase )
_lowerCAmelCase : Any = os.path.join(_lowerCamelCase ,_lowerCamelCase )
print(f"Save PyTorch model to {os.path.abspath(_lowerCamelCase )}" )
torch.save(model.state_dict() ,_lowerCamelCase )
print(f"Save configuration file to {os.path.abspath(_lowerCamelCase )}" )
with open(_lowerCamelCase ,"""w""" ,encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
_a : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--xlnet_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained XLNet model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the folder to store the PyTorch model or dataset/vocab.',
)
parser.add_argument(
'--finetuning_task',
default=None,
type=str,
help='Name of a task on which the XLNet TensorFlow model was fine-tuned',
)
_a : List[Any] = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 44
|
"""simple docstring"""
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format='%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=os.environ.get('LOGLEVEL', 'INFO').upper(),
stream=sys.stdout,
)
_a : Tuple = logging.getLogger(__name__)
_a : Any = {'facebook/bart-base': BartForConditionalGeneration}
_a : List[str] = {'facebook/bart-base': BartTokenizer}
def SCREAMING_SNAKE_CASE ( ) -> int:
_lowerCAmelCase : int = argparse.ArgumentParser(description="""Export Bart model + Beam Search to ONNX graph.""" )
parser.add_argument(
"""--validation_file""" ,type=_lowerCamelCase ,default=_lowerCamelCase ,help="""A csv or a json file containing the validation data.""" )
parser.add_argument(
"""--max_length""" ,type=_lowerCamelCase ,default=5 ,help="""The maximum total input sequence length after tokenization.""" ,)
parser.add_argument(
"""--num_beams""" ,type=_lowerCamelCase ,default=_lowerCamelCase ,help=(
"""Number of beams to use for evaluation. This argument will be """
"""passed to ``model.generate``, which is used during ``evaluate`` and ``predict``."""
) ,)
parser.add_argument(
"""--model_name_or_path""" ,type=_lowerCamelCase ,help="""Path to pretrained model or model identifier from huggingface.co/models.""" ,required=_lowerCamelCase ,)
parser.add_argument(
"""--config_name""" ,type=_lowerCamelCase ,default=_lowerCamelCase ,help="""Pretrained config name or path if not the same as model_name""" ,)
parser.add_argument(
"""--device""" ,type=_lowerCamelCase ,default="""cpu""" ,help="""Device where the model will be run""" ,)
parser.add_argument("""--output_file_path""" ,type=_lowerCamelCase ,default=_lowerCamelCase ,help="""Where to store the final ONNX file.""" )
_lowerCAmelCase : Optional[Any] = parser.parse_args()
return args
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[Any] ,_lowerCamelCase : Union[str, Any]="cpu" ) -> str:
_lowerCAmelCase : List[str] = model_dict[model_name].from_pretrained(_lowerCamelCase ).to(_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = tokenizer_dict[model_name].from_pretrained(_lowerCamelCase )
if model_name in ["facebook/bart-base"]:
_lowerCAmelCase : List[Any] = 0
_lowerCAmelCase : str = None
_lowerCAmelCase : List[str] = 0
return huggingface_model, tokenizer
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, Any] ,_lowerCamelCase : List[str] ,_lowerCamelCase : int ,_lowerCamelCase : List[Any] ,_lowerCamelCase : List[str] ) -> Tuple:
model.eval()
_lowerCAmelCase : str = None
_lowerCAmelCase : int = torch.jit.script(BARTBeamSearchGenerator(_lowerCamelCase ) )
with torch.no_grad():
_lowerCAmelCase : List[Any] = """My friends are cool but they eat too many carbs."""
_lowerCAmelCase : Union[str, Any] = tokenizer([ARTICLE_TO_SUMMARIZE] ,max_length=1024 ,return_tensors="""pt""" ).to(model.device )
_lowerCAmelCase : Any = model.generate(
inputs["""input_ids"""] ,attention_mask=inputs["""attention_mask"""] ,num_beams=_lowerCamelCase ,max_length=_lowerCamelCase ,early_stopping=_lowerCamelCase ,decoder_start_token_id=model.config.decoder_start_token_id ,)
torch.onnx.export(
_lowerCamelCase ,(
inputs["""input_ids"""],
inputs["""attention_mask"""],
num_beams,
max_length,
model.config.decoder_start_token_id,
) ,_lowerCamelCase ,opset_version=14 ,input_names=["""input_ids""", """attention_mask""", """num_beams""", """max_length""", """decoder_start_token_id"""] ,output_names=["""output_ids"""] ,dynamic_axes={
"""input_ids""": {0: """batch""", 1: """seq"""},
"""output_ids""": {0: """batch""", 1: """seq_out"""},
} ,example_outputs=_lowerCamelCase ,)
logger.info("""Model exported to {}""".format(_lowerCamelCase ) )
_lowerCAmelCase : List[str] = remove_dup_initializers(os.path.abspath(_lowerCamelCase ) )
logger.info("""Deduplicated and optimized model written to {}""".format(_lowerCamelCase ) )
_lowerCAmelCase : str = onnxruntime.InferenceSession(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = ort_sess.run(
_lowerCamelCase ,{
"""input_ids""": inputs["""input_ids"""].cpu().numpy(),
"""attention_mask""": inputs["""attention_mask"""].cpu().numpy(),
"""num_beams""": np.array(_lowerCamelCase ),
"""max_length""": np.array(_lowerCamelCase ),
"""decoder_start_token_id""": np.array(model.config.decoder_start_token_id ),
} ,)
np.testing.assert_allclose(summary_ids.cpu().numpy() ,ort_out[0] ,rtol=1e-3 ,atol=1e-3 )
logger.info("""Model outputs from torch and ONNX Runtime are similar.""" )
logger.info("""Success.""" )
def SCREAMING_SNAKE_CASE ( ) -> Any:
_lowerCAmelCase : Any = parse_args()
_lowerCAmelCase : List[Any] = 5
_lowerCAmelCase : str = 4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" ,datefmt="""%m/%d/%Y %H:%M:%S""" ,level=logging.INFO ,)
logger.setLevel(logging.INFO )
transformers.utils.logging.set_verbosity_error()
_lowerCAmelCase : Optional[Any] = torch.device(args.device )
_lowerCAmelCase , _lowerCAmelCase : List[str] = load_model_tokenizer(args.model_name_or_path ,_lowerCamelCase )
if model.config.decoder_start_token_id is None:
raise ValueError("""Make sure that `config.decoder_start_token_id` is correctly defined""" )
model.to(_lowerCamelCase )
if args.max_length:
_lowerCAmelCase : Dict = args.max_length
if args.num_beams:
_lowerCAmelCase : Dict = args.num_beams
if args.output_file_path:
_lowerCAmelCase : Any = args.output_file_path
else:
_lowerCAmelCase : Union[str, Any] = """BART.onnx"""
logger.info("""Exporting model to ONNX""" )
export_and_validate_model(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
if __name__ == "__main__":
main()
| 44
| 1
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class __A :
_UpperCamelCase : Any = XGLMConfig
_UpperCamelCase : List[Any] = {}
_UpperCamelCase : Optional[int] = "gelu"
def __init__( self , a__ , a__=14 , a__=7 , a__=True , a__=True , a__=True , a__=99 , a__=32 , a__=2 , a__=4 , a__=37 , a__="gelu" , a__=0.1 , a__=0.1 , a__=512 , a__=0.0_2 , ):
_lowerCAmelCase : Optional[int] = parent
_lowerCAmelCase : int = batch_size
_lowerCAmelCase : Optional[Any] = seq_length
_lowerCAmelCase : Any = is_training
_lowerCAmelCase : Optional[int] = use_input_mask
_lowerCAmelCase : str = use_labels
_lowerCAmelCase : Any = vocab_size
_lowerCAmelCase : Optional[int] = d_model
_lowerCAmelCase : int = num_hidden_layers
_lowerCAmelCase : Union[str, Any] = num_attention_heads
_lowerCAmelCase : Union[str, Any] = ffn_dim
_lowerCAmelCase : Any = activation_function
_lowerCAmelCase : Tuple = activation_dropout
_lowerCAmelCase : int = attention_dropout
_lowerCAmelCase : Optional[int] = max_position_embeddings
_lowerCAmelCase : str = initializer_range
_lowerCAmelCase : List[Any] = None
_lowerCAmelCase : Tuple = 0
_lowerCAmelCase : str = 2
_lowerCAmelCase : Optional[int] = 1
def __A ( self ):
return XGLMConfig.from_pretrained("""facebook/xglm-564M""" )
def __A ( self ):
_lowerCAmelCase : Dict = tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 )
_lowerCAmelCase : Tuple = None
if self.use_input_mask:
_lowerCAmelCase : Dict = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCAmelCase : int = self.get_config()
_lowerCAmelCase : int = floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def __A ( self ):
return XGLMConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=a__ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=a__ , )
def __A ( self ):
_lowerCAmelCase : Tuple = self.prepare_config_and_inputs()
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) : List[str] = config_and_inputs
_lowerCAmelCase : Tuple = {
"""input_ids""": input_ids,
"""head_mask""": head_mask,
}
return config, inputs_dict
@require_tf
class __A ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : Optional[Any] = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
_UpperCamelCase : List[str] = (TFXGLMForCausalLM,) if is_tf_available() else ()
_UpperCamelCase : Optional[Any] = (
{"feature-extraction": TFXGLMModel, "text-generation": TFXGLMForCausalLM} if is_tf_available() else {}
)
_UpperCamelCase : Any = False
_UpperCamelCase : int = False
_UpperCamelCase : Union[str, Any] = False
def __A ( self ):
_lowerCAmelCase : List[str] = TFXGLMModelTester(self )
_lowerCAmelCase : Optional[Any] = ConfigTester(self , config_class=a__ , n_embd=37 )
def __A ( self ):
self.config_tester.run_common_tests()
@slow
def __A ( self ):
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : Any = TFXGLMModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
@unittest.skip(reason="""Currently, model embeddings are going to undergo a major refactor.""" )
def __A ( self ):
super().test_resize_token_embeddings()
@require_tf
class __A ( unittest.TestCase ):
@slow
def __A ( self , a__=True ):
_lowerCAmelCase : List[str] = TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""" )
_lowerCAmelCase : Tuple = tf.convert_to_tensor([[2, 268, 9865]] , dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
_lowerCAmelCase : Any = [2, 268, 9865, 67, 11, 1988, 57252, 9865, 5, 984, 67, 1988, 213838, 1658, 53, 70446, 33, 6657, 278, 1581]
# fmt: on
_lowerCAmelCase : str = model.generate(a__ , do_sample=a__ , num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist() , a__ )
@slow
def __A ( self ):
_lowerCAmelCase : str = XGLMTokenizer.from_pretrained("""facebook/xglm-564M""" )
_lowerCAmelCase : Tuple = TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""" )
tf.random.set_seed(0 )
_lowerCAmelCase : List[Any] = tokenizer("""Today is a nice day and""" , return_tensors="""tf""" )
_lowerCAmelCase : Optional[Any] = tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(""":/CPU:0""" ):
_lowerCAmelCase : List[str] = model.generate(a__ , do_sample=a__ , seed=[7, 0] )
_lowerCAmelCase : int = tokenizer.decode(output_ids[0] , skip_special_tokens=a__ )
_lowerCAmelCase : Union[str, Any] = (
"""Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due"""
)
self.assertEqual(a__ , a__ )
@slow
def __A ( self ):
_lowerCAmelCase : Union[str, Any] = TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""" )
_lowerCAmelCase : List[Any] = XGLMTokenizer.from_pretrained("""facebook/xglm-564M""" )
_lowerCAmelCase : Optional[Any] = """left"""
# use different length sentences to test batching
_lowerCAmelCase : List[Any] = [
"""This is an extremelly long sentence that only exists to test the ability of the model to cope with """
"""left-padding, such as in batched generation. The output for the sequence below should be the same """
"""regardless of whether left padding is applied or not. When""",
"""Hello, my dog is a little""",
]
_lowerCAmelCase : int = tokenizer(a__ , return_tensors="""tf""" , padding=a__ )
_lowerCAmelCase : Optional[int] = inputs["""input_ids"""]
_lowerCAmelCase : List[Any] = model.generate(input_ids=a__ , attention_mask=inputs["""attention_mask"""] , max_new_tokens=12 )
_lowerCAmelCase : Any = tokenizer(sentences[0] , return_tensors="""tf""" ).input_ids
_lowerCAmelCase : List[Any] = model.generate(input_ids=a__ , max_new_tokens=12 )
_lowerCAmelCase : str = tokenizer(sentences[1] , return_tensors="""tf""" ).input_ids
_lowerCAmelCase : Optional[Any] = model.generate(input_ids=a__ , max_new_tokens=12 )
_lowerCAmelCase : Any = tokenizer.batch_decode(a__ , skip_special_tokens=a__ )
_lowerCAmelCase : Union[str, Any] = tokenizer.decode(output_non_padded[0] , skip_special_tokens=a__ )
_lowerCAmelCase : Optional[int] = tokenizer.decode(output_padded[0] , skip_special_tokens=a__ )
_lowerCAmelCase : Dict = [
"""This is an extremelly long sentence that only exists to test the ability of the model to cope with """
"""left-padding, such as in batched generation. The output for the sequence below should be the same """
"""regardless of whether left padding is applied or not. When left padding is applied, the sequence will be """
"""a single""",
"""Hello, my dog is a little bit of a shy one, but he is very friendly""",
]
self.assertListEqual(a__ , a__ )
self.assertListEqual(a__ , [non_padded_sentence, padded_sentence] )
| 44
|
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Any ) -> List[Any]: # noqa: E741
_lowerCAmelCase : Optional[int] = len(_lowerCamelCase )
_lowerCAmelCase : str = 0
_lowerCAmelCase : Any = [0] * n
_lowerCAmelCase : str = [False] * n
_lowerCAmelCase : str = [False] * n
def dfs(_lowerCamelCase : Tuple ,_lowerCamelCase : Union[str, Any] ,_lowerCamelCase : Union[str, Any] ,_lowerCamelCase : str ):
if parent == root:
out_edge_count += 1
_lowerCAmelCase : Any = True
_lowerCAmelCase : int = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
_lowerCAmelCase : Union[str, Any] = dfs(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
_lowerCAmelCase : Optional[int] = min(low[at] ,low[to] )
# AP found via bridge
if at < low[to]:
_lowerCAmelCase : int = True
# AP found via cycle
if at == low[to]:
_lowerCAmelCase : Tuple = True
else:
_lowerCAmelCase : Union[str, Any] = min(low[at] ,_lowerCamelCase )
return out_edge_count
for i in range(_lowerCamelCase ):
if not visited[i]:
_lowerCAmelCase : int = 0
_lowerCAmelCase : Dict = dfs(_lowerCamelCase ,_lowerCamelCase ,-1 ,_lowerCamelCase )
_lowerCAmelCase : List[str] = out_edge_count > 1
for x in range(len(_lowerCamelCase ) ):
if is_art[x] is True:
print(_lowerCamelCase )
# Adjacency list of graph
_a : Optional[Any] = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data)
| 44
| 1
|
"""simple docstring"""
from datetime import datetime
import requests
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : str ) -> bytes:
_lowerCAmelCase : Tuple = """https://downloadgram.net/wp-json/wppress/video-downloader/video?url="""
_lowerCAmelCase : Optional[Any] = requests.get(base_url + url ).json()[0]["""urls"""][0]["""src"""]
return requests.get(_lowerCamelCase ).content
if __name__ == "__main__":
_a : Union[str, Any] = input('Enter Video/IGTV url: ').strip()
_a : Tuple = F"""{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4"""
with open(file_name, 'wb') as fp:
fp.write(download_video(url))
print(F"""Done. Video saved to disk as {file_name}.""")
| 44
|
"""simple docstring"""
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_a : Optional[int] = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class __A ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : List[Any] = XGLMTokenizer
_UpperCamelCase : List[Any] = XGLMTokenizerFast
_UpperCamelCase : Dict = True
_UpperCamelCase : Tuple = True
def __A ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
_lowerCAmelCase : List[Any] = XGLMTokenizer(a__ , keep_accents=a__ )
tokenizer.save_pretrained(self.tmpdirname )
def __A ( self ):
_lowerCAmelCase : List[str] = """<pad>"""
_lowerCAmelCase : List[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a__ ) , a__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a__ ) , a__ )
def __A ( self ):
_lowerCAmelCase : Optional[int] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(len(a__ ) , 1008 )
def __A ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 1008 )
def __A ( self ):
_lowerCAmelCase : List[Any] = XGLMTokenizer(a__ , keep_accents=a__ )
_lowerCAmelCase : Dict = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(a__ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(a__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
_lowerCAmelCase : Any = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
a__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
_lowerCAmelCase : List[str] = tokenizer.convert_tokens_to_ids(a__ )
self.assertListEqual(
a__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
_lowerCAmelCase : Optional[int] = tokenizer.convert_ids_to_tokens(a__ )
self.assertListEqual(
a__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
@cached_property
def __A ( self ):
return XGLMTokenizer.from_pretrained("""facebook/xglm-564M""" )
def __A ( self ):
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(a__ , f.name )
_lowerCAmelCase : Union[str, Any] = XGLMTokenizer(f.name , keep_accents=a__ )
_lowerCAmelCase : List[str] = pickle.dumps(a__ )
pickle.loads(a__ )
def __A ( self ):
if not self.test_rust_tokenizer:
return
_lowerCAmelCase : List[str] = self.get_tokenizer()
_lowerCAmelCase : Optional[Any] = self.get_rust_tokenizer()
_lowerCAmelCase : Tuple = """I was born in 92000, and this is falsé."""
_lowerCAmelCase : List[Any] = tokenizer.tokenize(a__ )
_lowerCAmelCase : Tuple = rust_tokenizer.tokenize(a__ )
self.assertListEqual(a__ , a__ )
_lowerCAmelCase : Union[str, Any] = tokenizer.encode(a__ , add_special_tokens=a__ )
_lowerCAmelCase : str = rust_tokenizer.encode(a__ , add_special_tokens=a__ )
self.assertListEqual(a__ , a__ )
_lowerCAmelCase : int = self.get_rust_tokenizer()
_lowerCAmelCase : Dict = tokenizer.encode(a__ )
_lowerCAmelCase : List[Any] = rust_tokenizer.encode(a__ )
self.assertListEqual(a__ , a__ )
@slow
def __A ( self ):
_lowerCAmelCase : int = """Hello World!"""
_lowerCAmelCase : Optional[int] = [2, 31227, 4447, 35]
self.assertListEqual(a__ , self.big_tokenizer.encode(a__ ) )
@slow
def __A ( self ):
_lowerCAmelCase : Any = (
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth"""
)
# fmt: off
_lowerCAmelCase : List[str] = [2, 1018, 67, 11, 1988, 2617, 5631, 278, 11, 3407, 48, 71630, 28085, 4, 3234, 157, 13, 6, 5, 6, 4, 3526, 768, 15, 659, 57, 298, 3983, 864, 129, 21, 6, 5, 13675, 377, 652, 7580, 10341, 155, 2817, 422, 1666, 7, 1674, 53, 113, 202277, 17892, 33, 60, 87, 4, 3234, 157, 61, 2667, 52376, 19, 88, 23, 735]
# fmt: on
self.assertListEqual(a__ , self.big_tokenizer.encode(a__ ) )
@slow
def __A ( self ):
# fmt: off
_lowerCAmelCase : List[str] = {
"""input_ids""": [[2, 108825, 1163, 15, 88010, 473, 15898, 157, 13672, 1857, 312, 8, 238021, 1163, 53, 13672, 1857, 312, 8, 53283, 182396, 8, 18566, 16, 36733, 4101, 8, 230, 244017, 122553, 7, 15, 132597, 4, 293, 12511, 7610, 4, 3414, 132597, 9, 4, 32361, 362, 4, 734, 28512, 32569, 18, 4, 32361, 26096, 14982, 73, 18715, 21433, 235261, 15, 492, 12427, 16, 53, 18715, 21433, 65454, 15, 23659, 563, 16, 278, 597, 2843, 595, 7931, 182396, 64186, 22, 886, 595, 132981, 53, 25540, 3449, 43982, 39901, 5951, 878, 330, 4, 27694, 80269, 312, 53, 6517, 11780, 611, 20408, 5], [2, 6, 132597, 67, 42897, 33, 592, 8, 163729, 25540, 361, 136997, 109514, 173230, 7, 501, 60, 102913, 196, 5631, 235, 63243, 473, 6, 231757, 74, 5277, 7905, 53, 3095, 37317, 22, 454, 183874, 5], [2, 268, 31298, 46530, 6, 132935, 43831, 7, 597, 32, 24, 3688, 9865, 5]],
"""attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=a__ , model_name="""facebook/xglm-564M""" , padding=a__ , )
| 44
| 1
|
"""simple docstring"""
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_a : Union[str, Any] = logging.get_logger(__name__)
_a : Union[str, Any] = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'}
_a : int = {
'vocab_file': {
'allenai/longformer-base-4096': 'https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json',
'allenai/longformer-large-4096': (
'https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json'
),
'allenai/longformer-large-4096-finetuned-triviaqa': (
'https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json'
),
'allenai/longformer-base-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json'
),
'allenai/longformer-large-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json'
),
},
'merges_file': {
'allenai/longformer-base-4096': 'https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt',
'allenai/longformer-large-4096': (
'https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt'
),
'allenai/longformer-large-4096-finetuned-triviaqa': (
'https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt'
),
'allenai/longformer-base-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt'
),
'allenai/longformer-large-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt'
),
},
}
_a : Optional[Any] = {
'allenai/longformer-base-4096': 4_096,
'allenai/longformer-large-4096': 4_096,
'allenai/longformer-large-4096-finetuned-triviaqa': 4_096,
'allenai/longformer-base-4096-extra.pos.embd.only': 4_096,
'allenai/longformer-large-4096-extra.pos.embd.only': 4_096,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def SCREAMING_SNAKE_CASE ( ) -> str:
_lowerCAmelCase : Optional[Any] = (
list(range(ord("""!""" ) ,ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ) ,ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ) ,ord("""ÿ""" ) + 1 ) )
)
_lowerCAmelCase : Dict = bs[:]
_lowerCAmelCase : Union[str, Any] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_lowerCamelCase )
cs.append(2**8 + n )
n += 1
_lowerCAmelCase : List[Any] = [chr(_lowerCamelCase ) for n in cs]
return dict(zip(_lowerCamelCase ,_lowerCamelCase ) )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, Any] ) -> Dict:
_lowerCAmelCase : int = set()
_lowerCAmelCase : List[Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_lowerCAmelCase : Tuple = char
return pairs
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Any = VOCAB_FILES_NAMES
_UpperCamelCase : List[str] = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : List[str] = ["input_ids", "attention_mask"]
def __init__( self , a__ , a__ , a__="replace" , a__="<s>" , a__="</s>" , a__="</s>" , a__="<s>" , a__="<unk>" , a__="<pad>" , a__="<mask>" , a__=False , **a__ , ):
_lowerCAmelCase : Union[str, Any] = AddedToken(a__ , lstrip=a__ , rstrip=a__ ) if isinstance(a__ , a__ ) else bos_token
_lowerCAmelCase : Optional[Any] = AddedToken(a__ , lstrip=a__ , rstrip=a__ ) if isinstance(a__ , a__ ) else eos_token
_lowerCAmelCase : List[Any] = AddedToken(a__ , lstrip=a__ , rstrip=a__ ) if isinstance(a__ , a__ ) else sep_token
_lowerCAmelCase : Optional[int] = AddedToken(a__ , lstrip=a__ , rstrip=a__ ) if isinstance(a__ , a__ ) else cls_token
_lowerCAmelCase : List[Any] = AddedToken(a__ , lstrip=a__ , rstrip=a__ ) if isinstance(a__ , a__ ) else unk_token
_lowerCAmelCase : Dict = AddedToken(a__ , lstrip=a__ , rstrip=a__ ) if isinstance(a__ , a__ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
_lowerCAmelCase : Optional[Any] = AddedToken(a__ , lstrip=a__ , rstrip=a__ ) if isinstance(a__ , a__ ) else mask_token
super().__init__(
errors=a__ , bos_token=a__ , eos_token=a__ , unk_token=a__ , sep_token=a__ , cls_token=a__ , pad_token=a__ , mask_token=a__ , add_prefix_space=a__ , **a__ , )
with open(a__ , encoding="""utf-8""" ) as vocab_handle:
_lowerCAmelCase : List[Any] = json.load(a__ )
_lowerCAmelCase : int = {v: k for k, v in self.encoder.items()}
_lowerCAmelCase : Dict = errors # how to handle errors in decoding
_lowerCAmelCase : Any = bytes_to_unicode()
_lowerCAmelCase : Union[str, Any] = {v: k for k, v in self.byte_encoder.items()}
with open(a__ , encoding="""utf-8""" ) as merges_handle:
_lowerCAmelCase : Dict = merges_handle.read().split("""\n""" )[1:-1]
_lowerCAmelCase : Union[str, Any] = [tuple(merge.split() ) for merge in bpe_merges]
_lowerCAmelCase : Optional[int] = dict(zip(a__ , range(len(a__ ) ) ) )
_lowerCAmelCase : List[str] = {}
_lowerCAmelCase : str = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
_lowerCAmelCase : int = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" )
@property
def __A ( self ):
return len(self.encoder )
def __A ( self ):
return dict(self.encoder , **self.added_tokens_encoder )
def __A ( self , a__ ):
if token in self.cache:
return self.cache[token]
_lowerCAmelCase : Any = tuple(a__ )
_lowerCAmelCase : Tuple = get_pairs(a__ )
if not pairs:
return token
while True:
_lowerCAmelCase : Optional[Any] = min(a__ , key=lambda a__ : self.bpe_ranks.get(a__ , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = bigram
_lowerCAmelCase : Union[str, Any] = []
_lowerCAmelCase : List[str] = 0
while i < len(a__ ):
try:
_lowerCAmelCase : str = word.index(a__ , a__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_lowerCAmelCase : Tuple = j
if word[i] == first and i < len(a__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_lowerCAmelCase : List[Any] = tuple(a__ )
_lowerCAmelCase : Optional[int] = new_word
if len(a__ ) == 1:
break
else:
_lowerCAmelCase : Any = get_pairs(a__ )
_lowerCAmelCase : str = """ """.join(a__ )
_lowerCAmelCase : str = word
return word
def __A ( self , a__ ):
_lowerCAmelCase : Tuple = []
for token in re.findall(self.pat , a__ ):
_lowerCAmelCase : List[Any] = """""".join(
self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(a__ ).split(""" """ ) )
return bpe_tokens
def __A ( self , a__ ):
return self.encoder.get(a__ , self.encoder.get(self.unk_token ) )
def __A ( self , a__ ):
return self.decoder.get(a__ )
def __A ( self , a__ ):
_lowerCAmelCase : Optional[int] = """""".join(a__ )
_lowerCAmelCase : int = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" , errors=self.errors )
return text
def __A ( self , a__ , a__ = None ):
if not os.path.isdir(a__ ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
_lowerCAmelCase : Tuple = os.path.join(
a__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
_lowerCAmelCase : List[str] = os.path.join(
a__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(a__ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=a__ , ensure_ascii=a__ ) + """\n""" )
_lowerCAmelCase : Optional[int] = 0
with open(a__ , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda a__ : kv[1] ):
if index != token_index:
logger.warning(
F"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
""" Please check that the tokenizer is not corrupted!""" )
_lowerCAmelCase : List[str] = token_index
writer.write(""" """.join(a__ ) + """\n""" )
index += 1
return vocab_file, merge_file
def __A ( self , a__ , a__ = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_lowerCAmelCase : Union[str, Any] = [self.cls_token_id]
_lowerCAmelCase : Optional[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __A ( self , a__ , a__ = None , a__ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a__ , token_ids_a=a__ , already_has_special_tokens=a__ )
if token_ids_a is None:
return [1] + ([0] * len(a__ )) + [1]
return [1] + ([0] * len(a__ )) + [1, 1] + ([0] * len(a__ )) + [1]
def __A ( self , a__ , a__ = None ):
_lowerCAmelCase : Dict = [self.sep_token_id]
_lowerCAmelCase : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __A ( self , a__ , a__=False , **a__ ):
_lowerCAmelCase : int = kwargs.pop("""add_prefix_space""" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(a__ ) > 0 and not text[0].isspace()):
_lowerCAmelCase : int = """ """ + text
return (text, kwargs)
| 44
|
"""simple docstring"""
from itertools import product
from cva import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey
from numpy import dot, exp, mgrid, pi, ravel, square, uinta, zeros
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[Any] ,_lowerCamelCase : int ) -> List[str]:
_lowerCAmelCase : Tuple = k_size // 2
_lowerCAmelCase , _lowerCAmelCase : List[str] = mgrid[0 - center : k_size - center, 0 - center : k_size - center]
_lowerCAmelCase : Union[str, Any] = 1 / (2 * pi * sigma) * exp(-(square(_lowerCamelCase ) + square(_lowerCamelCase )) / (2 * square(_lowerCamelCase )) )
return g
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Dict ,_lowerCamelCase : int ,_lowerCamelCase : int ) -> Dict:
_lowerCAmelCase , _lowerCAmelCase : str = image.shape[0], image.shape[1]
# dst image height and width
_lowerCAmelCase : Optional[int] = height - k_size + 1
_lowerCAmelCase : Dict = width - k_size + 1
# im2col, turn the k_size*k_size pixels into a row and np.vstack all rows
_lowerCAmelCase : Tuple = zeros((dst_height * dst_width, k_size * k_size) )
_lowerCAmelCase : int = 0
for i, j in product(range(_lowerCamelCase ) ,range(_lowerCamelCase ) ):
_lowerCAmelCase : Any = ravel(image[i : i + k_size, j : j + k_size] )
_lowerCAmelCase : Union[str, Any] = window
row += 1
# turn the kernel into shape(k*k, 1)
_lowerCAmelCase : List[Any] = gen_gaussian_kernel(_lowerCamelCase ,_lowerCamelCase )
_lowerCAmelCase : str = ravel(_lowerCamelCase )
# reshape and get the dst image
_lowerCAmelCase : int = dot(_lowerCamelCase ,_lowerCamelCase ).reshape(_lowerCamelCase ,_lowerCamelCase ).astype(_lowerCamelCase )
return dst
if __name__ == "__main__":
# read original image
_a : Optional[Any] = imread(r'../image_data/lena.jpg')
# turn image in gray scale value
_a : Dict = cvtColor(img, COLOR_BGR2GRAY)
# get values with two different mask size
_a : Union[str, Any] = gaussian_filter(gray, 3, sigma=1)
_a : List[Any] = gaussian_filter(gray, 5, sigma=0.8)
# show result images
imshow('gaussian filter with 3x3 mask', gaussianaxa)
imshow('gaussian filter with 5x5 mask', gaussianaxa)
waitKey()
| 44
| 1
|
"""simple docstring"""
import argparse
from collections import defaultdict
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[int] ,_lowerCamelCase : Tuple ,_lowerCamelCase : Union[str, Any] ,_lowerCamelCase : Optional[int] ,_lowerCamelCase : Any ) -> int:
_lowerCAmelCase : Optional[int] = f"{file}_{class_name}_{test_name}"
done_test[_id] += 1
with open(_lowerCamelCase ,"""r""" ) as f:
_lowerCAmelCase : Any = f.readlines()
_lowerCAmelCase : Tuple = f"class {class_name}("
_lowerCAmelCase : Union[str, Any] = f"{4 * ' '}def {test_name}("
_lowerCAmelCase : Dict = f"{8 * ' '}{correct_line.split()[0]}"
_lowerCAmelCase : Dict = f"{16 * ' '}{correct_line.split()[0]}"
_lowerCAmelCase : int = False
_lowerCAmelCase : Optional[Any] = False
_lowerCAmelCase : List[Any] = False
_lowerCAmelCase : Optional[int] = False
_lowerCAmelCase : int = 0
_lowerCAmelCase : Optional[int] = 0
_lowerCAmelCase : str = []
for line in lines:
if line.startswith(_lowerCamelCase ):
_lowerCAmelCase : int = True
elif in_class and line.startswith(_lowerCamelCase ):
_lowerCAmelCase : List[str] = True
elif in_class and in_func and (line.startswith(_lowerCamelCase ) or line.startswith(_lowerCamelCase )):
_lowerCAmelCase : Dict = len(line.split(correct_line.split()[0] )[0] )
count += 1
if count == done_test[_id]:
_lowerCAmelCase : Optional[int] = True
if in_class and in_func and in_line:
if ")" not in line:
continue
else:
_lowerCAmelCase : int = True
if in_class and in_func and in_line and insert_line:
new_lines.append(f"{spaces * ' '}{correct_line}" )
_lowerCAmelCase : Union[str, Any] = False
else:
new_lines.append(_lowerCamelCase )
with open(_lowerCamelCase ,"""w""" ) as f:
for line in new_lines:
f.write(_lowerCamelCase )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Any ,_lowerCamelCase : Tuple=None ) -> Union[str, Any]:
if fail is not None:
with open(_lowerCamelCase ,"""r""" ) as f:
_lowerCAmelCase : Optional[int] = {l.strip() for l in f.readlines()}
else:
_lowerCAmelCase : List[Any] = None
with open(_lowerCamelCase ,"""r""" ) as f:
_lowerCAmelCase : str = f.readlines()
_lowerCAmelCase : str = defaultdict(_lowerCamelCase )
for line in correct_lines:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : int = line.split(""";""" )
if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures:
overwrite_file(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
if __name__ == "__main__":
_a : Optional[int] = argparse.ArgumentParser()
parser.add_argument('--correct_filename', help='filename of tests with expected result')
parser.add_argument('--fail_filename', help='filename of test failures', type=str, default=None)
_a : List[str] = parser.parse_args()
main(args.correct_filename, args.fail_filename)
| 44
|
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_electra import ElectraTokenizer
_a : List[Any] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
_a : Union[str, Any] = {
'vocab_file': {
'google/electra-small-generator': (
'https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt'
),
'google/electra-base-generator': 'https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt',
'google/electra-large-generator': (
'https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt'
),
'google/electra-small-discriminator': (
'https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt'
),
'google/electra-base-discriminator': (
'https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt'
),
'google/electra-large-discriminator': (
'https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'google/electra-small-generator': (
'https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json'
),
'google/electra-base-generator': (
'https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json'
),
'google/electra-large-generator': (
'https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json'
),
'google/electra-small-discriminator': (
'https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json'
),
'google/electra-base-discriminator': (
'https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json'
),
'google/electra-large-discriminator': (
'https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json'
),
},
}
_a : Optional[Any] = {
'google/electra-small-generator': 512,
'google/electra-base-generator': 512,
'google/electra-large-generator': 512,
'google/electra-small-discriminator': 512,
'google/electra-base-discriminator': 512,
'google/electra-large-discriminator': 512,
}
_a : Any = {
'google/electra-small-generator': {'do_lower_case': True},
'google/electra-base-generator': {'do_lower_case': True},
'google/electra-large-generator': {'do_lower_case': True},
'google/electra-small-discriminator': {'do_lower_case': True},
'google/electra-base-discriminator': {'do_lower_case': True},
'google/electra-large-discriminator': {'do_lower_case': True},
}
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Tuple = VOCAB_FILES_NAMES
_UpperCamelCase : List[Any] = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : List[Any] = PRETRAINED_INIT_CONFIGURATION
_UpperCamelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : Optional[Any] = ElectraTokenizer
def __init__( self , a__=None , a__=None , a__=True , a__="[UNK]" , a__="[SEP]" , a__="[PAD]" , a__="[CLS]" , a__="[MASK]" , a__=True , a__=None , **a__ , ):
super().__init__(
a__ , tokenizer_file=a__ , do_lower_case=a__ , unk_token=a__ , sep_token=a__ , pad_token=a__ , cls_token=a__ , mask_token=a__ , tokenize_chinese_chars=a__ , strip_accents=a__ , **a__ , )
_lowerCAmelCase : int = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , a__ ) != do_lower_case
or normalizer_state.get("""strip_accents""" , a__ ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , a__ ) != tokenize_chinese_chars
):
_lowerCAmelCase : Dict = getattr(a__ , normalizer_state.pop("""type""" ) )
_lowerCAmelCase : int = do_lower_case
_lowerCAmelCase : str = strip_accents
_lowerCAmelCase : Dict = tokenize_chinese_chars
_lowerCAmelCase : str = normalizer_class(**a__ )
_lowerCAmelCase : List[str] = do_lower_case
def __A ( self , a__ , a__=None ):
_lowerCAmelCase : int = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __A ( self , a__ , a__ = None ):
_lowerCAmelCase : List[str] = [self.sep_token_id]
_lowerCAmelCase : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __A ( self , a__ , a__ = None ):
_lowerCAmelCase : Optional[Any] = self._tokenizer.model.save(a__ , name=a__ )
return tuple(a__ )
| 44
| 1
|
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : list ) -> list:
for i in range(len(_lowerCamelCase ) - 1 ,0 ,-1 ):
_lowerCAmelCase : Dict = False
for j in range(_lowerCamelCase ,0 ,-1 ):
if unsorted[j] < unsorted[j - 1]:
_lowerCAmelCase , _lowerCAmelCase : List[str] = unsorted[j - 1], unsorted[j]
_lowerCAmelCase : int = True
for j in range(_lowerCamelCase ):
if unsorted[j] > unsorted[j + 1]:
_lowerCAmelCase , _lowerCAmelCase : Any = unsorted[j + 1], unsorted[j]
_lowerCAmelCase : int = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
_a : Tuple = input('Enter numbers separated by a comma:\n').strip()
_a : Optional[Any] = [int(item) for item in user_input.split(',')]
print(F"""{cocktail_shaker_sort(unsorted) = }""")
| 44
|
"""simple docstring"""
from scipy.stats import pearsonr
import datasets
_a : str = '\nPearson correlation coefficient and p-value for testing non-correlation.\nThe Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.\nThe p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.\n'
_a : List[str] = '\nArgs:\n predictions (`list` of `int`): Predicted class labels, as returned by a model.\n references (`list` of `int`): Ground truth labels.\n return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.\n\nReturns:\n pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.\n p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.\n\nExamples:\n\n Example 1-A simple example using only predictions and references.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n\n Example 2-The same as Example 1, but that also returns the `p-value`.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)\n >>> print(sorted(list(results.keys())))\n [\'p-value\', \'pearsonr\']\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n >>> print(round(results[\'p-value\'], 2))\n 0.15\n'
_a : List[Any] = '\n@article{2020SciPy-NMeth,\nauthor = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, Ilhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Antonio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\ntitle = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\njournal = {Nature Methods},\nyear = {2020},\nvolume = {17},\npages = {261--272},\nadsurl = {https://rdcu.be/b08Wh},\ndoi = {10.1038/s41592-019-0686-2},\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __A ( datasets.Metric ):
def __A ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""float""" ),
"""references""": datasets.Value("""float""" ),
} ) , reference_urls=["""https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html"""] , )
def __A ( self , a__ , a__ , a__=False ):
if return_pvalue:
_lowerCAmelCase : List[Any] = pearsonr(a__ , a__ )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(a__ , a__ )[0] )}
| 44
| 1
|
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int ,_lowerCamelCase : float ,_lowerCamelCase : float ) -> float:
return round(float(moles / volume ) * nfactor )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : float ,_lowerCamelCase : float ,_lowerCamelCase : float ) -> float:
return round(float((moles * 0.08_21 * temperature) / (volume) ) )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : float ,_lowerCamelCase : float ,_lowerCamelCase : float ) -> float:
return round(float((moles * 0.08_21 * temperature) / (pressure) ) )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : float ,_lowerCamelCase : float ,_lowerCamelCase : float ) -> float:
return round(float((pressure * volume) / (0.08_21 * moles) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 44
|
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int = 50 ) -> int:
_lowerCAmelCase : int = [1] * (length + 1)
for row_length in range(3 ,length + 1 ):
for block_length in range(3 ,row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(F"""{solution() = }""")
| 44
| 1
|
"""simple docstring"""
import numpy as np
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : np.array ) -> np.array:
return 1 / (1 + np.exp(-vector ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 44
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Optional[Any] = "naver-clova-ix/donut-base-finetuned-docvqa"
_UpperCamelCase : Dict = (
"This is a tool that answers a question about an document (pdf). It takes an input named `document` which "
"should be the document containing the information, as well as a `question` that is the question about the "
"document. It returns a text that contains the answer to the question."
)
_UpperCamelCase : Optional[int] = "document_qa"
_UpperCamelCase : Any = AutoProcessor
_UpperCamelCase : Union[str, Any] = VisionEncoderDecoderModel
_UpperCamelCase : Union[str, Any] = ["image", "text"]
_UpperCamelCase : List[str] = ["text"]
def __init__( self , *a__ , **a__ ):
if not is_vision_available():
raise ValueError("""Pillow must be installed to use the DocumentQuestionAnsweringTool.""" )
super().__init__(*a__ , **a__ )
def __A ( self , a__ , a__ ):
_lowerCAmelCase : Optional[int] = """<s_docvqa><s_question>{user_input}</s_question><s_answer>"""
_lowerCAmelCase : Dict = task_prompt.replace("""{user_input}""" , a__ )
_lowerCAmelCase : str = self.pre_processor.tokenizer(
a__ , add_special_tokens=a__ , return_tensors="""pt""" ).input_ids
_lowerCAmelCase : Dict = self.pre_processor(a__ , return_tensors="""pt""" ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def __A ( self , a__ ):
return self.model.generate(
inputs["""pixel_values"""].to(self.device ) , decoder_input_ids=inputs["""decoder_input_ids"""].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=a__ , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=a__ , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=a__ , ).sequences
def __A ( self , a__ ):
_lowerCAmelCase : Tuple = self.pre_processor.batch_decode(a__ )[0]
_lowerCAmelCase : int = sequence.replace(self.pre_processor.tokenizer.eos_token , """""" )
_lowerCAmelCase : List[str] = sequence.replace(self.pre_processor.tokenizer.pad_token , """""" )
_lowerCAmelCase : List[str] = re.sub(r"""<.*?>""" , """""" , a__ , count=1 ).strip() # remove first task start token
_lowerCAmelCase : List[str] = self.pre_processor.tokenajson(a__ )
return sequence["answer"]
| 44
| 1
|
"""simple docstring"""
from __future__ import annotations
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int ,_lowerCamelCase : int ) -> list[list[int]]:
_lowerCAmelCase : list[list[int]] = []
create_all_state(1 ,_lowerCamelCase ,_lowerCamelCase ,[] ,_lowerCamelCase )
return result
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int ,_lowerCamelCase : int ,_lowerCamelCase : int ,_lowerCamelCase : list[int] ,_lowerCamelCase : list[list[int]] ,) -> None:
if level == 0:
total_list.append(current_list[:] )
return
for i in range(_lowerCamelCase ,total_number - level + 2 ):
current_list.append(_lowerCamelCase )
create_all_state(i + 1 ,_lowerCamelCase ,level - 1 ,_lowerCamelCase ,_lowerCamelCase )
current_list.pop()
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : list[list[int]] ) -> None:
for i in total_list:
print(*_lowerCamelCase )
if __name__ == "__main__":
_a : Optional[int] = 4
_a : Optional[Any] = 2
_a : Optional[Any] = generate_all_combinations(n, k)
print_all_state(total_list)
| 44
|
"""simple docstring"""
from __future__ import annotations
_a : List[str] = 10
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : list[int] ) -> list[int]:
_lowerCAmelCase : Optional[int] = 1
_lowerCAmelCase : Union[str, Any] = max(_lowerCamelCase )
while placement <= max_digit:
# declare and initialize empty buckets
_lowerCAmelCase : list[list] = [[] for _ in range(_lowerCamelCase )]
# split list_of_ints between the buckets
for i in list_of_ints:
_lowerCAmelCase : Tuple = int((i / placement) % RADIX )
buckets[tmp].append(_lowerCamelCase )
# put each buckets' contents into list_of_ints
_lowerCAmelCase : List[str] = 0
for b in range(_lowerCamelCase ):
for i in buckets[b]:
_lowerCAmelCase : Any = i
a += 1
# move to next
placement *= RADIX
return list_of_ints
if __name__ == "__main__":
import doctest
doctest.testmod()
| 44
| 1
|
"""simple docstring"""
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
'The `inpainting.py` script is outdated. Please use directly `from diffusers import'
' StableDiffusionInpaintPipeline` instead.'
)
| 44
|
"""simple docstring"""
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
'stable diffusion controlnet',
'0.22.0',
'Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.',
standard_warn=False,
stacklevel=3,
)
| 44
| 1
|
"""simple docstring"""
from __future__ import annotations
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int | str ) -> bool:
_lowerCAmelCase : Tuple = str(_lowerCamelCase )
return n == n[::-1]
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int = 1000000 ) -> Union[str, Any]:
_lowerCAmelCase : Dict = 0
for i in range(1 ,_lowerCamelCase ):
if is_palindrome(_lowerCamelCase ) and is_palindrome(bin(_lowerCamelCase ).split("""b""" )[1] ):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip()))))
| 44
|
"""simple docstring"""
import unittest
from transformers import DebertaVaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaVaForMaskedLM,
DebertaVaForMultipleChoice,
DebertaVaForQuestionAnswering,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaModel,
)
from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST
class __A ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , a__ , a__=13 , a__=7 , a__=True , a__=True , a__=True , a__=True , a__=99 , a__=32 , a__=5 , a__=4 , a__=37 , a__="gelu" , a__=0.1 , a__=0.1 , a__=512 , a__=16 , a__=2 , a__=0.0_2 , a__=False , a__=True , a__="None" , a__=3 , a__=4 , a__=None , ):
_lowerCAmelCase : Dict = parent
_lowerCAmelCase : str = batch_size
_lowerCAmelCase : List[Any] = seq_length
_lowerCAmelCase : Dict = is_training
_lowerCAmelCase : Dict = use_input_mask
_lowerCAmelCase : int = use_token_type_ids
_lowerCAmelCase : int = use_labels
_lowerCAmelCase : Optional[int] = vocab_size
_lowerCAmelCase : Tuple = hidden_size
_lowerCAmelCase : Tuple = num_hidden_layers
_lowerCAmelCase : Dict = num_attention_heads
_lowerCAmelCase : Union[str, Any] = intermediate_size
_lowerCAmelCase : str = hidden_act
_lowerCAmelCase : Any = hidden_dropout_prob
_lowerCAmelCase : List[str] = attention_probs_dropout_prob
_lowerCAmelCase : List[str] = max_position_embeddings
_lowerCAmelCase : List[str] = type_vocab_size
_lowerCAmelCase : Tuple = type_sequence_label_size
_lowerCAmelCase : List[Any] = initializer_range
_lowerCAmelCase : Union[str, Any] = num_labels
_lowerCAmelCase : Optional[Any] = num_choices
_lowerCAmelCase : Tuple = relative_attention
_lowerCAmelCase : Tuple = position_biased_input
_lowerCAmelCase : Dict = pos_att_type
_lowerCAmelCase : Any = scope
def __A ( self ):
_lowerCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase : Optional[Any] = None
if self.use_input_mask:
_lowerCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
_lowerCAmelCase : str = None
if self.use_token_type_ids:
_lowerCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowerCAmelCase : Union[str, Any] = None
_lowerCAmelCase : Union[str, Any] = None
_lowerCAmelCase : Any = None
if self.use_labels:
_lowerCAmelCase : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowerCAmelCase : str = ids_tensor([self.batch_size] , self.num_choices )
_lowerCAmelCase : List[str] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __A ( self ):
return DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def __A ( self , a__ ):
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : Union[str, Any] = DebertaVaModel(config=a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : List[Any] = model(a__ , attention_mask=a__ , token_type_ids=a__ )[0]
_lowerCAmelCase : List[Any] = model(a__ , token_type_ids=a__ )[0]
_lowerCAmelCase : Any = model(a__ )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : List[str] = DebertaVaForMaskedLM(config=a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : Optional[Any] = model(a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : int = self.num_labels
_lowerCAmelCase : int = DebertaVaForSequenceClassification(a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : Optional[Any] = model(a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(a__ )
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : List[Any] = self.num_labels
_lowerCAmelCase : str = DebertaVaForTokenClassification(config=a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : Optional[Any] = model(a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : Any = DebertaVaForQuestionAnswering(config=a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : Dict = model(
a__ , attention_mask=a__ , token_type_ids=a__ , start_positions=a__ , end_positions=a__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : Union[str, Any] = DebertaVaForMultipleChoice(config=a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : Optional[int] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCAmelCase : Optional[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCAmelCase : str = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCAmelCase : List[str] = model(
a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __A ( self ):
_lowerCAmelCase : Tuple = self.prepare_config_and_inputs()
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) : Union[str, Any] = config_and_inputs
_lowerCAmelCase : Union[str, Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __A ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : Union[str, Any] = (
(
DebertaVaModel,
DebertaVaForMaskedLM,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaForQuestionAnswering,
DebertaVaForMultipleChoice,
)
if is_torch_available()
else ()
)
_UpperCamelCase : str = (
{
"feature-extraction": DebertaVaModel,
"fill-mask": DebertaVaForMaskedLM,
"question-answering": DebertaVaForQuestionAnswering,
"text-classification": DebertaVaForSequenceClassification,
"token-classification": DebertaVaForTokenClassification,
"zero-shot": DebertaVaForSequenceClassification,
}
if is_torch_available()
else {}
)
_UpperCamelCase : Optional[Any] = True
_UpperCamelCase : List[Any] = False
_UpperCamelCase : List[Any] = False
_UpperCamelCase : Dict = False
_UpperCamelCase : Tuple = False
def __A ( self ):
_lowerCAmelCase : Optional[Any] = DebertaVaModelTester(self )
_lowerCAmelCase : Any = ConfigTester(self , config_class=a__ , hidden_size=37 )
def __A ( self ):
self.config_tester.run_common_tests()
def __A ( self ):
_lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*a__ )
def __A ( self ):
_lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*a__ )
def __A ( self ):
_lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*a__ )
def __A ( self ):
_lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*a__ )
def __A ( self ):
_lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*a__ )
def __A ( self ):
_lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_multiple_choice(*a__ )
@slow
def __A ( self ):
for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : Tuple = DebertaVaModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
@require_torch
@require_sentencepiece
@require_tokenizers
class __A ( unittest.TestCase ):
@unittest.skip(reason="""Model not available yet""" )
def __A ( self ):
pass
@slow
def __A ( self ):
_lowerCAmelCase : Union[str, Any] = DebertaVaModel.from_pretrained("""microsoft/deberta-v2-xlarge""" )
_lowerCAmelCase : Dict = torch.tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] )
_lowerCAmelCase : Any = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_lowerCAmelCase : Optional[Any] = model(a__ , attention_mask=a__ )[0]
# compare the actual values for a slice.
_lowerCAmelCase : str = torch.tensor(
[[[0.2_3_5_6, 0.1_9_4_8, 0.0_3_6_9], [-0.1_0_6_3, 0.3_5_8_6, -0.5_1_5_2], [-0.6_3_9_9, -0.0_2_5_9, -0.2_5_2_5]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , a__ , atol=1e-4 ) , F"{output[:, 1:4, 1:4]}" )
| 44
| 1
|
"""simple docstring"""
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
_a : Dict = subprocess.check_output('git merge-base main HEAD'.split()).decode('utf-8')
_a : Tuple = (
subprocess.check_output(F"""git diff --diff-filter=d --name-only {fork_point_sha}""".split()).decode('utf-8').split()
)
_a : Optional[Any] = '|'.join(sys.argv[1:])
_a : Optional[Any] = re.compile(rF"""^({joined_dirs}).*?\.py$""")
_a : Optional[Any] = [x for x in modified_files if regex.match(x)]
print(' '.join(relevant_modified_files), end='')
| 44
|
"""simple docstring"""
import numpy as np
import qiskit
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int = 8 ,_lowerCamelCase : int | None = None ) -> str:
_lowerCAmelCase : int = np.random.default_rng(seed=_lowerCamelCase )
# Roughly 25% of the qubits will contribute to the key.
# So we take more than we need.
_lowerCAmelCase : Tuple = 6 * key_len
# Measurement basis for Alice's qubits.
_lowerCAmelCase : Dict = rng.integers(2 ,size=_lowerCamelCase )
# The set of states Alice will prepare.
_lowerCAmelCase : Tuple = rng.integers(2 ,size=_lowerCamelCase )
# Measurement basis for Bob's qubits.
_lowerCAmelCase : Union[str, Any] = rng.integers(2 ,size=_lowerCamelCase )
# Quantum Circuit to simulate BB84
_lowerCAmelCase : Dict = qiskit.QuantumCircuit(_lowerCamelCase ,name="""BB84""" )
# Alice prepares her qubits according to rules above.
for index, _ in enumerate(_lowerCamelCase ):
if alice_state[index] == 1:
bbaa_circ.x(_lowerCamelCase )
if alice_basis[index] == 1:
bbaa_circ.h(_lowerCamelCase )
bbaa_circ.barrier()
# Bob measures the received qubits according to rules above.
for index, _ in enumerate(_lowerCamelCase ):
if bob_basis[index] == 1:
bbaa_circ.h(_lowerCamelCase )
bbaa_circ.barrier()
bbaa_circ.measure_all()
# Simulate the quantum circuit.
_lowerCAmelCase : int = qiskit.Aer.get_backend("""aer_simulator""" )
# We only need to run one shot because the key is unique.
# Multiple shots will produce the same key.
_lowerCAmelCase : List[str] = qiskit.execute(_lowerCamelCase ,_lowerCamelCase ,shots=1 ,seed_simulator=_lowerCamelCase )
# Returns the result of measurement.
_lowerCAmelCase : List[Any] = job.result().get_counts(_lowerCamelCase ).most_frequent()
# Extracting the generated key from the simulation results.
# Only keep measurement results where Alice and Bob chose the same basis.
_lowerCAmelCase : str = """""".join(
[
result_bit
for alice_basis_bit, bob_basis_bit, result_bit in zip(
_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
if alice_basis_bit == bob_basis_bit
] )
# Get final key. Pad with 0 if too short, otherwise truncate.
_lowerCAmelCase : List[Any] = gen_key[:key_len] if len(_lowerCamelCase ) >= key_len else gen_key.ljust(_lowerCamelCase ,"""0""" )
return key
if __name__ == "__main__":
print(F"""The generated key is : {bbaa(8, seed=0)}""")
from doctest import testmod
testmod()
| 44
| 1
|
"""simple docstring"""
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class __A ( unittest.TestCase ):
def __A ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __A ( self ):
_lowerCAmelCase : List[str] = 1
_lowerCAmelCase : Tuple = 3
_lowerCAmelCase : Optional[int] = (32, 32)
_lowerCAmelCase : int = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(a__ )
return image
@property
def __A ( self ):
torch.manual_seed(0 )
_lowerCAmelCase : Dict = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
return model
@property
def __A ( self ):
torch.manual_seed(0 )
_lowerCAmelCase : Tuple = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
return model
@property
def __A ( self ):
torch.manual_seed(0 )
_lowerCAmelCase : Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModel(a__ )
@property
def __A ( self ):
def extract(*a__ , **a__ ):
class __A :
def __init__( self ):
_lowerCAmelCase : Dict = torch.ones([0] )
def __A ( self , a__ ):
self.pixel_values.to(a__ )
return self
return Out()
return extract
def __A ( self ):
_lowerCAmelCase : int = """cpu""" # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase : Optional[Any] = self.dummy_cond_unet
_lowerCAmelCase : Any = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , clip_sample=a__ , set_alpha_to_one=a__ , )
_lowerCAmelCase : str = self.dummy_vae
_lowerCAmelCase : Tuple = self.dummy_text_encoder
_lowerCAmelCase : Optional[Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# make sure here that pndm scheduler skips prk
_lowerCAmelCase : Optional[int] = StableDiffusionPipeline(
unet=a__ , scheduler=a__ , vae=a__ , text_encoder=a__ , tokenizer=a__ , safety_checker=a__ , feature_extractor=self.dummy_extractor , )
_lowerCAmelCase : int = sd_pipe.to(a__ )
sd_pipe.set_progress_bar_config(disable=a__ )
_lowerCAmelCase : Optional[Any] = """A painting of a squirrel eating a burger"""
_lowerCAmelCase : List[Any] = torch.Generator(device=a__ ).manual_seed(0 )
_lowerCAmelCase : Union[str, Any] = sd_pipe([prompt] , generator=a__ , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" )
_lowerCAmelCase : Any = output.images
_lowerCAmelCase : str = torch.Generator(device=a__ ).manual_seed(0 )
_lowerCAmelCase : int = sd_pipe(
[prompt] , generator=a__ , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , return_dict=a__ , )[0]
_lowerCAmelCase : Tuple = image[0, -3:, -3:, -1]
_lowerCAmelCase : Dict = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_lowerCAmelCase : Dict = np.array([0.5_7_5_6, 0.6_1_1_8, 0.5_0_0_5, 0.5_0_4_1, 0.5_4_7_1, 0.4_7_2_6, 0.4_9_7_6, 0.4_8_6_5, 0.4_8_6_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def __A ( self ):
_lowerCAmelCase : Optional[int] = """cpu""" # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase : int = self.dummy_cond_unet
_lowerCAmelCase : Optional[int] = PNDMScheduler(skip_prk_steps=a__ )
_lowerCAmelCase : List[str] = self.dummy_vae
_lowerCAmelCase : Any = self.dummy_text_encoder
_lowerCAmelCase : Any = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# make sure here that pndm scheduler skips prk
_lowerCAmelCase : str = StableDiffusionPipeline(
unet=a__ , scheduler=a__ , vae=a__ , text_encoder=a__ , tokenizer=a__ , safety_checker=a__ , feature_extractor=self.dummy_extractor , )
_lowerCAmelCase : Optional[Any] = sd_pipe.to(a__ )
sd_pipe.set_progress_bar_config(disable=a__ )
_lowerCAmelCase : str = """A painting of a squirrel eating a burger"""
_lowerCAmelCase : str = torch.Generator(device=a__ ).manual_seed(0 )
_lowerCAmelCase : Tuple = sd_pipe([prompt] , generator=a__ , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" )
_lowerCAmelCase : Tuple = output.images
_lowerCAmelCase : Dict = torch.Generator(device=a__ ).manual_seed(0 )
_lowerCAmelCase : List[str] = sd_pipe(
[prompt] , generator=a__ , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , return_dict=a__ , )[0]
_lowerCAmelCase : Any = image[0, -3:, -3:, -1]
_lowerCAmelCase : List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_lowerCAmelCase : Union[str, Any] = np.array([0.5_1_2_5, 0.5_7_1_6, 0.4_8_2_8, 0.5_0_6_0, 0.5_6_5_0, 0.4_7_6_8, 0.5_1_8_5, 0.4_8_9_5, 0.4_9_9_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def __A ( self ):
_lowerCAmelCase : List[str] = StableDiffusionPipeline.from_pretrained(
"""hf-internal-testing/tiny-stable-diffusion-lms-pipe""" , safety_checker=a__ )
assert isinstance(a__ , a__ )
assert isinstance(pipe.scheduler , a__ )
assert pipe.safety_checker is None
_lowerCAmelCase : Any = pipe("""example prompt""" , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(a__ )
_lowerCAmelCase : str = StableDiffusionPipeline.from_pretrained(a__ )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
_lowerCAmelCase : Optional[Any] = pipe("""example prompt""" , num_inference_steps=2 ).images[0]
assert image is not None
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def __A ( self ):
_lowerCAmelCase : int = self.dummy_cond_unet
_lowerCAmelCase : str = PNDMScheduler(skip_prk_steps=a__ )
_lowerCAmelCase : Any = self.dummy_vae
_lowerCAmelCase : Dict = self.dummy_text_encoder
_lowerCAmelCase : Tuple = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# put models in fp16
_lowerCAmelCase : str = unet.half()
_lowerCAmelCase : List[str] = vae.half()
_lowerCAmelCase : Tuple = bert.half()
# make sure here that pndm scheduler skips prk
_lowerCAmelCase : Dict = StableDiffusionPipeline(
unet=a__ , scheduler=a__ , vae=a__ , text_encoder=a__ , tokenizer=a__ , safety_checker=a__ , feature_extractor=self.dummy_extractor , )
_lowerCAmelCase : int = sd_pipe.to(a__ )
sd_pipe.set_progress_bar_config(disable=a__ )
_lowerCAmelCase : int = """A painting of a squirrel eating a burger"""
_lowerCAmelCase : Dict = sd_pipe([prompt] , num_inference_steps=2 , output_type="""np""" ).images
assert image.shape == (1, 64, 64, 3)
@nightly
@require_torch_gpu
class __A ( unittest.TestCase ):
def __A ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self ):
_lowerCAmelCase : List[str] = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" , safety_checker=a__ )
_lowerCAmelCase : List[Any] = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
_lowerCAmelCase : Optional[int] = sd_pipe.to(a__ )
sd_pipe.set_progress_bar_config(disable=a__ )
_lowerCAmelCase : Dict = (
"""portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle"""
""" coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with"""
""" anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and"""
""" children from bahnhof zoo, detailed """
)
_lowerCAmelCase : List[Any] = 4003660346
_lowerCAmelCase : List[Any] = 7
# without safety guidance (sld_guidance_scale = 0)
_lowerCAmelCase : int = torch.manual_seed(a__ )
_lowerCAmelCase : Dict = sd_pipe(
[prompt] , generator=a__ , guidance_scale=a__ , num_inference_steps=50 , output_type="""np""" , width=512 , height=512 , sld_guidance_scale=0 , )
_lowerCAmelCase : int = output.images
_lowerCAmelCase : Optional[int] = image[0, -3:, -3:, -1]
_lowerCAmelCase : Dict = [0.2_2_7_8, 0.2_2_3_1, 0.2_2_4_9, 0.2_3_3_3, 0.2_3_0_3, 0.1_8_8_5, 0.2_2_7_3, 0.2_1_4_4, 0.2_1_7_6]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
# without safety guidance (strong configuration)
_lowerCAmelCase : Optional[int] = torch.manual_seed(a__ )
_lowerCAmelCase : List[str] = sd_pipe(
[prompt] , generator=a__ , guidance_scale=a__ , num_inference_steps=50 , output_type="""np""" , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.0_2_5 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
_lowerCAmelCase : Optional[int] = output.images
_lowerCAmelCase : Tuple = image[0, -3:, -3:, -1]
_lowerCAmelCase : str = [0.2_3_8_3, 0.2_2_7_6, 0.2_3_6, 0.2_1_9_2, 0.2_1_8_6, 0.2_0_5_3, 0.1_9_7_1, 0.1_9_0_1, 0.1_7_1_9]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __A ( self ):
_lowerCAmelCase : Tuple = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" , safety_checker=a__ )
_lowerCAmelCase : List[str] = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
_lowerCAmelCase : Optional[Any] = sd_pipe.to(a__ )
sd_pipe.set_progress_bar_config(disable=a__ )
_lowerCAmelCase : List[str] = """padme amidala taking a bath artwork, safe for work, no nudity"""
_lowerCAmelCase : Tuple = 2734971755
_lowerCAmelCase : Union[str, Any] = 7
_lowerCAmelCase : Optional[int] = torch.manual_seed(a__ )
_lowerCAmelCase : Any = sd_pipe(
[prompt] , generator=a__ , guidance_scale=a__ , num_inference_steps=50 , output_type="""np""" , width=512 , height=512 , sld_guidance_scale=0 , )
_lowerCAmelCase : int = output.images
_lowerCAmelCase : int = image[0, -3:, -3:, -1]
_lowerCAmelCase : Dict = [0.3_5_0_2, 0.3_6_2_2, 0.3_3_9_6, 0.3_6_4_2, 0.3_4_7_8, 0.3_3_1_8, 0.3_5, 0.3_3_4_8, 0.3_2_9_7]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
_lowerCAmelCase : Optional[Any] = torch.manual_seed(a__ )
_lowerCAmelCase : Any = sd_pipe(
[prompt] , generator=a__ , guidance_scale=a__ , num_inference_steps=50 , output_type="""np""" , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.0_2_5 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
_lowerCAmelCase : List[Any] = output.images
_lowerCAmelCase : List[Any] = image[0, -3:, -3:, -1]
_lowerCAmelCase : Optional[int] = [0.5_5_3_1, 0.5_2_0_6, 0.4_8_9_5, 0.5_1_5_6, 0.5_1_8_2, 0.4_7_5_1, 0.4_8_0_2, 0.4_8_0_3, 0.4_4_4_3]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __A ( self ):
_lowerCAmelCase : List[str] = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" )
_lowerCAmelCase : Any = sd_pipe.to(a__ )
sd_pipe.set_progress_bar_config(disable=a__ )
_lowerCAmelCase : Dict = (
"""the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c."""
""" leyendecker"""
)
_lowerCAmelCase : int = 1044355234
_lowerCAmelCase : Tuple = 12
_lowerCAmelCase : int = torch.manual_seed(a__ )
_lowerCAmelCase : str = sd_pipe(
[prompt] , generator=a__ , guidance_scale=a__ , num_inference_steps=50 , output_type="""np""" , width=512 , height=512 , sld_guidance_scale=0 , )
_lowerCAmelCase : List[str] = output.images
_lowerCAmelCase : List[str] = image[0, -3:, -3:, -1]
_lowerCAmelCase : int = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-7
_lowerCAmelCase : Optional[int] = torch.manual_seed(a__ )
_lowerCAmelCase : List[Any] = sd_pipe(
[prompt] , generator=a__ , guidance_scale=a__ , num_inference_steps=50 , output_type="""np""" , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.0_2_5 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
_lowerCAmelCase : List[str] = output.images
_lowerCAmelCase : Any = image[0, -3:, -3:, -1]
_lowerCAmelCase : List[str] = np.array([0.5_8_1_8, 0.6_2_8_5, 0.6_8_3_5, 0.6_0_1_9, 0.6_2_5, 0.6_7_5_4, 0.6_0_9_6, 0.6_3_3_4, 0.6_5_6_1] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 44
|
"""simple docstring"""
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
_a : Union[str, Any] = re.compile('[^A-Za-z_0-9]')
# parameters used in DuplicationIndex
_a : List[str] = 10
_a : List[Any] = 256
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[str] ) -> Optional[MinHash]:
if len(_lowerCamelCase ) < MIN_NUM_TOKENS:
return None
_lowerCAmelCase : Optional[Any] = MinHash(num_perm=_lowerCamelCase )
for token in set(_lowerCamelCase ):
min_hash.update(token.encode() )
return min_hash
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : str ) -> Set[str]:
return {t for t in NON_ALPHA.split(_lowerCamelCase ) if len(t.strip() ) > 0}
class __A :
def __init__( self , *,
a__ = 0.8_5 , ):
_lowerCAmelCase : List[Any] = duplication_jaccard_threshold
_lowerCAmelCase : Union[str, Any] = NUM_PERM
_lowerCAmelCase : Optional[int] = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
_lowerCAmelCase : Optional[int] = defaultdict(a__ )
def __A ( self , a__ , a__ ):
_lowerCAmelCase : Optional[Any] = self._index.query(a__ )
if code_key in self._index.keys:
print(F"Duplicate key {code_key}" )
return
self._index.insert(a__ , a__ )
if len(a__ ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(a__ )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(a__ )
def __A ( self ):
_lowerCAmelCase : int = []
for base, duplicates in self._duplicate_clusters.items():
_lowerCAmelCase : List[str] = [base] + list(a__ )
# reformat the cluster to be a list of dict
_lowerCAmelCase : List[Any] = [{"""base_index""": el[0], """repo_name""": el[1], """path""": el[2]} for el in cluster]
duplicate_clusters.append(a__ )
return duplicate_clusters
def __A ( self , a__ ):
_lowerCAmelCase : Dict = self.get_duplicate_clusters()
with open(a__ , """w""" ) as f:
json.dump(a__ , a__ )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[Any] ) -> Tuple:
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = element
_lowerCAmelCase : Tuple = get_min_hash([t for t in NON_ALPHA.split(data["""content"""] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Type[Dataset] ) -> Optional[Any]:
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash ,ThreadedIterator(_lowerCamelCase ,max_queue_size=10000 ) ,chunksize=100 ,):
if data is not None:
yield data
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Type[Dataset] ,_lowerCamelCase : float ) -> List[str]:
_lowerCAmelCase : Optional[Any] = DuplicationIndex(duplication_jaccard_threshold=_lowerCamelCase )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(_lowerCamelCase ) ) ,max_queue_size=100 ) ):
di.add(_lowerCamelCase ,_lowerCamelCase )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : str ,_lowerCamelCase : str ) -> float:
_lowerCAmelCase : Any = get_tokens(_lowerCamelCase )
_lowerCAmelCase : Optional[int] = get_tokens(_lowerCamelCase )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
_a : str = None
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int ,_lowerCamelCase : List[Any] ) -> Dict:
_lowerCAmelCase : int = []
for elementa in cluster:
_lowerCAmelCase : Dict = _shared_dataset[elementa["""base_index"""]]["""content"""]
for elementa in extremes:
_lowerCAmelCase : Any = _shared_dataset[elementa["""base_index"""]]["""content"""]
if jaccard_similarity(_lowerCamelCase ,_lowerCamelCase ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
_lowerCAmelCase : Any = 1
extremes.append(_lowerCamelCase )
return extremes
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[Any] ,_lowerCamelCase : Optional[Any] ,_lowerCamelCase : Any ) -> str:
global _shared_dataset
_lowerCAmelCase : Tuple = dataset
_lowerCAmelCase : Optional[Any] = []
_lowerCAmelCase : Optional[Any] = partial(_find_cluster_extremes_shared ,jaccard_threshold=_lowerCamelCase )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
_lowerCamelCase ,_lowerCamelCase ,) ,total=len(_lowerCamelCase ) ,):
extremes_list.append(_lowerCamelCase )
return extremes_list
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Type[Dataset] ,_lowerCamelCase : float = 0.85 ) -> Tuple[Type[Dataset], List[List[Dict]]]:
_lowerCAmelCase : Tuple = make_duplicate_clusters(_lowerCamelCase ,_lowerCamelCase )
_lowerCAmelCase : str = {x["""base_index"""] for cluster in duplicate_clusters for x in cluster}
_lowerCAmelCase : Optional[int] = {}
_lowerCAmelCase : Tuple = find_extremes(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
for extremes in extremes_clusters:
for element in extremes:
_lowerCAmelCase : Union[str, Any] = element
_lowerCAmelCase : List[Any] = duplicate_indices - set(extreme_dict.keys() )
_lowerCAmelCase : List[Any] = dataset.filter(lambda _lowerCamelCase ,_lowerCamelCase : idx not in remove_indices ,with_indices=_lowerCamelCase )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
_lowerCAmelCase : Tuple = element["""base_index"""] in extreme_dict
if element["is_extreme"]:
_lowerCAmelCase : Dict = extreme_dict[element["""base_index"""]]["""copies"""]
print(f"Original dataset size: {len(_lowerCamelCase )}" )
print(f"Number of duplicate clusters: {len(_lowerCamelCase )}" )
print(f"Files in duplicate cluster: {len(_lowerCamelCase )}" )
print(f"Unique files in duplicate cluster: {len(_lowerCamelCase )}" )
print(f"Filtered dataset size: {len(_lowerCamelCase )}" )
return ds_filter, duplicate_clusters
| 44
| 1
|
"""simple docstring"""
# XXX: we want transformers master here - in the absense of conftest manipulating sys.path:
# hack it in for now:
import sys
from pathlib import Path
_a : List[str] = Path(__file__).resolve().parents[3] / 'src'
sys.path.insert(1, str(git_repo_path))
import dataclasses # noqa
import io # noqa
import itertools # noqa
import json # noqa
import os # noqa
import unittest # noqa
from copy import deepcopy # noqa
from parameterized import parameterized # noqa
from transformers import TrainingArguments, is_torch_available # noqa
from transformers.deepspeed import is_deepspeed_available # noqa
from transformers.file_utils import WEIGHTS_NAME # noqa
from transformers.testing_utils import ( # noqa
CaptureLogger,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
mockenv_context,
require_deepspeed,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
from transformers.trainer_utils import set_seed # noqa
set_seed(42)
_a : str = {'base': 'patrickvonplaten/wav2vec2_tiny_random', 'robust': 'patrickvonplaten/wav2vec2_tiny_random_robust'}
_a : str = 'zero2'
_a : Any = 'zero3'
_a : Dict = [ZEROa, ZEROa]
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Dict ,_lowerCamelCase : str ,_lowerCamelCase : Optional[int] ) -> List[str]:
# customize the test name generator function as we want both params to appear in the sub-test
# name, as by default it shows only the first param
_lowerCAmelCase : Union[str, Any] = parameterized.to_safe_name("""_""".join(str(_lowerCamelCase ) for x in param.args ) )
return f"{func.__name__}_{param_based_name}"
# Cartesian-product of zero stages with models to test
_a : Tuple = list(itertools.product(stages, models.keys()))
@slow
@require_deepspeed
@require_torch_gpu
class __A ( SCREAMING_SNAKE_CASE_ ):
@parameterized.expand(a__ , name_func=a__ )
def __A ( self , a__ , a__ ):
self.run_and_check(
stage=a__ , model=a__ , distributed=a__ , fpaa=a__ , )
@require_torch_multi_gpu
@parameterized.expand(a__ , name_func=a__ )
def __A ( self , a__ , a__ ):
self.run_and_check(
stage=a__ , model=a__ , distributed=a__ , fpaa=a__ , )
@parameterized.expand(a__ , name_func=a__ )
def __A ( self , a__ , a__ ):
self.run_and_check(
stage=a__ , model=a__ , distributed=a__ , fpaa=a__ , )
@require_torch_multi_gpu
@parameterized.expand(a__ , name_func=a__ )
def __A ( self , a__ , a__ ):
self.run_and_check(
stage=a__ , model=a__ , distributed=a__ , fpaa=a__ , )
def __A ( self , a__ ):
# XXX: run_asr is premature and doesn't save any results
# so all we check for now is that the process didn't fail
pass
def __A ( self , a__ , a__ , a__ = 10 , a__ = True , a__ = True , a__ = True , ):
_lowerCAmelCase : Union[str, Any] = models[model]
_lowerCAmelCase : Union[str, Any] = self.run_trainer(
stage=a__ , model_name=a__ , eval_steps=a__ , num_train_epochs=1 , distributed=a__ , fpaa=a__ , )
self.do_checks(a__ )
return output_dir
def __A ( self , a__ , a__ , a__ = 10 , a__ = 1 , a__ = True , a__ = True , ):
_lowerCAmelCase : Union[str, Any] = self.get_auto_remove_tmp_dir("""./xxx""" , after=a__ )
_lowerCAmelCase : Tuple = F"\n --model_name_or_path {model_name}\n --dataset_name hf-internal-testing/librispeech_asr_dummy\n --dataset_config_name clean\n --train_split_name validation\n --validation_split_name validation\n --output_dir {output_dir}\n --num_train_epochs {str(a__ )}\n --per_device_train_batch_size 2\n --per_device_eval_batch_size 2\n --evaluation_strategy steps\n --learning_rate 5e-4\n --warmup_steps 8\n --orthography timit\n --preprocessing_num_workers 1\n --group_by_length\n --freeze_feature_extractor\n --report_to none\n --save_steps 0\n --eval_steps {eval_steps}\n --report_to none\n ".split()
if fpaa:
args.extend(["""--fp16"""] )
# currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true,
# hence the separate config files
_lowerCAmelCase : int = F"--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json".split()
_lowerCAmelCase : Union[str, Any] = [F"{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py"]
_lowerCAmelCase : Tuple = self.get_launcher(a__ )
_lowerCAmelCase : Dict = launcher + script + args + ds_args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(a__ , env=self.get_env() )
return output_dir
def __A ( self , a__=False ):
# 1. explicitly set --num_nodes=1 just in case these tests end up run on a multi-node setup
# - it won't be able to handle that
# 2. for now testing with just 2 gpus max (since some quality tests may give different
# results with mode gpus because we use very little data)
_lowerCAmelCase : str = min(2 , get_gpu_count() ) if distributed else 1
return F"deepspeed --num_nodes 1 --num_gpus {num_gpus}".split()
| 44
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a : List[Any] = logging.get_logger(__name__)
_a : Any = {
'microsoft/swinv2-tiny-patch4-window8-256': (
'https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json'
),
}
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : int = "swinv2"
_UpperCamelCase : List[str] = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self , a__=224 , a__=4 , a__=3 , a__=96 , a__=[2, 2, 6, 2] , a__=[3, 6, 12, 24] , a__=7 , a__=4.0 , a__=True , a__=0.0 , a__=0.0 , a__=0.1 , a__="gelu" , a__=False , a__=0.0_2 , a__=1e-5 , a__=32 , **a__ , ):
super().__init__(**a__ )
_lowerCAmelCase : int = image_size
_lowerCAmelCase : Optional[Any] = patch_size
_lowerCAmelCase : Any = num_channels
_lowerCAmelCase : List[Any] = embed_dim
_lowerCAmelCase : Optional[int] = depths
_lowerCAmelCase : List[Any] = len(a__ )
_lowerCAmelCase : Any = num_heads
_lowerCAmelCase : Tuple = window_size
_lowerCAmelCase : Tuple = mlp_ratio
_lowerCAmelCase : Any = qkv_bias
_lowerCAmelCase : Optional[int] = hidden_dropout_prob
_lowerCAmelCase : Tuple = attention_probs_dropout_prob
_lowerCAmelCase : str = drop_path_rate
_lowerCAmelCase : List[str] = hidden_act
_lowerCAmelCase : List[str] = use_absolute_embeddings
_lowerCAmelCase : Optional[int] = layer_norm_eps
_lowerCAmelCase : List[Any] = initializer_range
_lowerCAmelCase : Any = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_lowerCAmelCase : Tuple = int(embed_dim * 2 ** (len(a__ ) - 1) )
_lowerCAmelCase : Tuple = (0, 0, 0, 0)
| 44
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_torch_available,
is_vision_available,
)
_a : Dict = {'configuration_beit': ['BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BeitConfig', 'BeitOnnxConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Union[str, Any] = ['BeitFeatureExtractor']
_a : Dict = ['BeitImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Tuple = [
'BEIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BeitForImageClassification',
'BeitForMaskedImageModeling',
'BeitForSemanticSegmentation',
'BeitModel',
'BeitPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : List[str] = [
'FlaxBeitForImageClassification',
'FlaxBeitForMaskedImageModeling',
'FlaxBeitModel',
'FlaxBeitPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_beit import BeitFeatureExtractor
from .image_processing_beit import BeitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_beit import (
BEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
BeitPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_beit import (
FlaxBeitForImageClassification,
FlaxBeitForMaskedImageModeling,
FlaxBeitModel,
FlaxBeitPreTrainedModel,
)
else:
import sys
_a : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 44
|
"""simple docstring"""
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class __A ( unittest.TestCase ):
def __A ( self ):
_lowerCAmelCase : Optional[int] = """ylacombe/bark-small"""
_lowerCAmelCase : Optional[Any] = tempfile.mkdtemp()
_lowerCAmelCase : int = """en_speaker_1"""
_lowerCAmelCase : List[Any] = """This is a test string"""
_lowerCAmelCase : Any = """speaker_embeddings_path.json"""
_lowerCAmelCase : List[Any] = """speaker_embeddings"""
def __A ( self , **a__ ):
return AutoTokenizer.from_pretrained(self.checkpoint , **a__ )
def __A ( self ):
shutil.rmtree(self.tmpdirname )
def __A ( self ):
_lowerCAmelCase : List[Any] = self.get_tokenizer()
_lowerCAmelCase : int = BarkProcessor(tokenizer=a__ )
processor.save_pretrained(self.tmpdirname )
_lowerCAmelCase : str = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def __A ( self ):
_lowerCAmelCase : Optional[int] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
_lowerCAmelCase : Tuple = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
_lowerCAmelCase : List[Any] = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="""(BOS)""" , eos_token="""(EOS)""" , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def __A ( self ):
_lowerCAmelCase : List[str] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
_lowerCAmelCase : Union[str, Any] = 35
_lowerCAmelCase : Union[str, Any] = 2
_lowerCAmelCase : Optional[int] = 8
_lowerCAmelCase : Dict = {
"""semantic_prompt""": np.ones(a__ ),
"""coarse_prompt""": np.ones((nb_codebooks_coarse, seq_len) ),
"""fine_prompt""": np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
_lowerCAmelCase : Dict = processor(text=self.input_string , voice_preset=a__ )
_lowerCAmelCase : Tuple = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(a__ , np.array([] ) ).tolist() )
# test loading voice preset from npz file
_lowerCAmelCase : List[Any] = os.path.join(self.tmpdirname , """file.npz""" )
np.savez(a__ , **a__ )
_lowerCAmelCase : List[Any] = processor(text=self.input_string , voice_preset=a__ )
_lowerCAmelCase : Optional[int] = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(a__ , np.array([] ) ).tolist() )
# test loading voice preset from the hub
_lowerCAmelCase : str = processor(text=self.input_string , voice_preset=self.voice_preset )
def __A ( self ):
_lowerCAmelCase : int = self.get_tokenizer()
_lowerCAmelCase : List[Any] = BarkProcessor(tokenizer=a__ )
_lowerCAmelCase : Dict = processor(text=self.input_string )
_lowerCAmelCase : Tuple = tokenizer(
self.input_string , padding="""max_length""" , max_length=256 , add_special_tokens=a__ , return_attention_mask=a__ , return_token_type_ids=a__ , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 44
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.