code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase_ =logging.get_logger(__name__)
UpperCAmelCase_ ="""▁"""
UpperCAmelCase_ ={"""vocab_file""": """sentencepiece.bpe.model"""}
UpperCAmelCase_ ={
"""vocab_file""": {
"""facebook/nllb-200-distilled-600M""": (
"""https://huggingface.co/facebook/nllb-200-distilled-600M/blob/main/sentencepiece.bpe.model"""
),
}
}
UpperCAmelCase_ ={
"""facebook/nllb-200-distilled-600M""": 1024,
}
# fmt: off
UpperCAmelCase_ =["""ace_Arab""", """ace_Latn""", """acm_Arab""", """acq_Arab""", """aeb_Arab""", """afr_Latn""", """ajp_Arab""", """aka_Latn""", """amh_Ethi""", """apc_Arab""", """arb_Arab""", """ars_Arab""", """ary_Arab""", """arz_Arab""", """asm_Beng""", """ast_Latn""", """awa_Deva""", """ayr_Latn""", """azb_Arab""", """azj_Latn""", """bak_Cyrl""", """bam_Latn""", """ban_Latn""", """bel_Cyrl""", """bem_Latn""", """ben_Beng""", """bho_Deva""", """bjn_Arab""", """bjn_Latn""", """bod_Tibt""", """bos_Latn""", """bug_Latn""", """bul_Cyrl""", """cat_Latn""", """ceb_Latn""", """ces_Latn""", """cjk_Latn""", """ckb_Arab""", """crh_Latn""", """cym_Latn""", """dan_Latn""", """deu_Latn""", """dik_Latn""", """dyu_Latn""", """dzo_Tibt""", """ell_Grek""", """eng_Latn""", """epo_Latn""", """est_Latn""", """eus_Latn""", """ewe_Latn""", """fao_Latn""", """pes_Arab""", """fij_Latn""", """fin_Latn""", """fon_Latn""", """fra_Latn""", """fur_Latn""", """fuv_Latn""", """gla_Latn""", """gle_Latn""", """glg_Latn""", """grn_Latn""", """guj_Gujr""", """hat_Latn""", """hau_Latn""", """heb_Hebr""", """hin_Deva""", """hne_Deva""", """hrv_Latn""", """hun_Latn""", """hye_Armn""", """ibo_Latn""", """ilo_Latn""", """ind_Latn""", """isl_Latn""", """ita_Latn""", """jav_Latn""", """jpn_Jpan""", """kab_Latn""", """kac_Latn""", """kam_Latn""", """kan_Knda""", """kas_Arab""", """kas_Deva""", """kat_Geor""", """knc_Arab""", """knc_Latn""", """kaz_Cyrl""", """kbp_Latn""", """kea_Latn""", """khm_Khmr""", """kik_Latn""", """kin_Latn""", """kir_Cyrl""", """kmb_Latn""", """kon_Latn""", """kor_Hang""", """kmr_Latn""", """lao_Laoo""", """lvs_Latn""", """lij_Latn""", """lim_Latn""", """lin_Latn""", """lit_Latn""", """lmo_Latn""", """ltg_Latn""", """ltz_Latn""", """lua_Latn""", """lug_Latn""", """luo_Latn""", """lus_Latn""", """mag_Deva""", """mai_Deva""", """mal_Mlym""", """mar_Deva""", """min_Latn""", """mkd_Cyrl""", """plt_Latn""", """mlt_Latn""", """mni_Beng""", """khk_Cyrl""", """mos_Latn""", """mri_Latn""", """zsm_Latn""", """mya_Mymr""", """nld_Latn""", """nno_Latn""", """nob_Latn""", """npi_Deva""", """nso_Latn""", """nus_Latn""", """nya_Latn""", """oci_Latn""", """gaz_Latn""", """ory_Orya""", """pag_Latn""", """pan_Guru""", """pap_Latn""", """pol_Latn""", """por_Latn""", """prs_Arab""", """pbt_Arab""", """quy_Latn""", """ron_Latn""", """run_Latn""", """rus_Cyrl""", """sag_Latn""", """san_Deva""", """sat_Beng""", """scn_Latn""", """shn_Mymr""", """sin_Sinh""", """slk_Latn""", """slv_Latn""", """smo_Latn""", """sna_Latn""", """snd_Arab""", """som_Latn""", """sot_Latn""", """spa_Latn""", """als_Latn""", """srd_Latn""", """srp_Cyrl""", """ssw_Latn""", """sun_Latn""", """swe_Latn""", """swh_Latn""", """szl_Latn""", """tam_Taml""", """tat_Cyrl""", """tel_Telu""", """tgk_Cyrl""", """tgl_Latn""", """tha_Thai""", """tir_Ethi""", """taq_Latn""", """taq_Tfng""", """tpi_Latn""", """tsn_Latn""", """tso_Latn""", """tuk_Latn""", """tum_Latn""", """tur_Latn""", """twi_Latn""", """tzm_Tfng""", """uig_Arab""", """ukr_Cyrl""", """umb_Latn""", """urd_Arab""", """uzn_Latn""", """vec_Latn""", """vie_Latn""", """war_Latn""", """wol_Latn""", """xho_Latn""", """ydd_Hebr""", """yor_Latn""", """yue_Hant""", """zho_Hans""", """zho_Hant""", """zul_Latn"""]
class __UpperCamelCase ( __UpperCAmelCase ):
'''simple docstring'''
__a : Optional[Any] =VOCAB_FILES_NAMES
__a : Optional[int] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a : Tuple =PRETRAINED_VOCAB_FILES_MAP
__a : Dict =["""input_ids""", """attention_mask"""]
__a : List[int] =[]
__a : List[int] =[]
def __init__( self , UpperCAmelCase_ , UpperCAmelCase_="<s>" , UpperCAmelCase_="</s>" , UpperCAmelCase_="</s>" , UpperCAmelCase_="<s>" , UpperCAmelCase_="<unk>" , UpperCAmelCase_="<pad>" , UpperCAmelCase_="<mask>" , UpperCAmelCase_=None , UpperCAmelCase_=None , UpperCAmelCase_=None , UpperCAmelCase_ = None , UpperCAmelCase_=None , UpperCAmelCase_=False , **UpperCAmelCase_ , ):
# Mask token behave like a normal word, i.e. include the space before it
lowerCAmelCase = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else mask_token
lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
lowerCAmelCase = legacy_behaviour
super().__init__(
bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , tokenizer_file=UpperCAmelCase_ , src_lang=UpperCAmelCase_ , tgt_lang=UpperCAmelCase_ , additional_special_tokens=UpperCAmelCase_ , sp_model_kwargs=self.sp_model_kwargs , legacy_behaviour=UpperCAmelCase_ , **UpperCAmelCase_ , )
lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(UpperCAmelCase_ ) )
lowerCAmelCase = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | ---- | ---- | ---- | ---- | ---- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a'
# spm | '<unk>' | '<s>' | '</s>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' | '▁s'
# Mimic fairseq token-to-id alignment for the first 4 token
lowerCAmelCase = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
lowerCAmelCase = 1
lowerCAmelCase = len(self.sp_model )
lowerCAmelCase = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(UpperCAmelCase_ )
}
lowerCAmelCase = {v: k for k, v in self.lang_code_to_id.items()}
lowerCAmelCase = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
lowerCAmelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
lowerCAmelCase = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
lowerCAmelCase = src_lang if src_lang is not None else '''eng_Latn'''
lowerCAmelCase = self.lang_code_to_id[self._src_lang]
lowerCAmelCase = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self ):
lowerCAmelCase = self.__dict__.copy()
lowerCAmelCase = None
lowerCAmelCase = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , UpperCAmelCase_ ):
lowerCAmelCase = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowerCAmelCase = {}
lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def __snake_case ( self ):
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def __snake_case ( self ):
return self._src_lang
@src_lang.setter
def __snake_case ( self , UpperCAmelCase_ ):
lowerCAmelCase = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ = None , UpperCAmelCase_ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase_ , token_ids_a=UpperCAmelCase_ , already_has_special_tokens=UpperCAmelCase_ )
lowerCAmelCase = [1] * len(self.prefix_tokens )
lowerCAmelCase = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(UpperCAmelCase_ )) + suffix_ones
return prefix_ones + ([0] * len(UpperCAmelCase_ )) + ([0] * len(UpperCAmelCase_ )) + suffix_ones
def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ = None ):
lowerCAmelCase = [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ ):
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
lowerCAmelCase = src_lang
lowerCAmelCase = self(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_ )
lowerCAmelCase = self.convert_tokens_to_ids(UpperCAmelCase_ )
lowerCAmelCase = tgt_lang_id
return inputs
def __snake_case ( self ):
lowerCAmelCase = {self.convert_ids_to_tokens(UpperCAmelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __snake_case ( self , UpperCAmelCase_ ):
return self.sp_model.encode(UpperCAmelCase_ , out_type=UpperCAmelCase_ )
def __snake_case ( self , UpperCAmelCase_ ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowerCAmelCase = self.sp_model.PieceToId(UpperCAmelCase_ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def __snake_case ( self , UpperCAmelCase_ ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __snake_case ( self , UpperCAmelCase_ ):
lowerCAmelCase = ''''''.join(UpperCAmelCase_ ).replace(UpperCAmelCase_ , ''' ''' ).strip()
return out_string
def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ = None ):
if not os.path.isdir(UpperCAmelCase_ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCAmelCase = os.path.join(
UpperCAmelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCAmelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCAmelCase_ , '''wb''' ) as fi:
lowerCAmelCase = self.sp_model.serialized_model_proto()
fi.write(UpperCAmelCase_ )
return (out_vocab_file,)
def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ = "eng_Latn" , UpperCAmelCase_ = None , UpperCAmelCase_ = "fra_Latn" , **UpperCAmelCase_ , ):
lowerCAmelCase = src_lang
lowerCAmelCase = tgt_lang
return super().prepare_seqaseq_batch(UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ )
def __snake_case ( self ):
return self.set_src_lang_special_tokens(self.src_lang )
def __snake_case ( self ):
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def __snake_case ( self , UpperCAmelCase_ ):
lowerCAmelCase = self.lang_code_to_id[src_lang]
if self.legacy_behaviour:
lowerCAmelCase = []
lowerCAmelCase = [self.eos_token_id, self.cur_lang_code]
else:
lowerCAmelCase = [self.cur_lang_code]
lowerCAmelCase = [self.eos_token_id]
def __snake_case ( self , UpperCAmelCase_ ):
lowerCAmelCase = self.lang_code_to_id[lang]
if self.legacy_behaviour:
lowerCAmelCase = []
lowerCAmelCase = [self.eos_token_id, self.cur_lang_code]
else:
lowerCAmelCase = [self.cur_lang_code]
lowerCAmelCase = [self.eos_token_id]
| 707
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ =logging.get_logger(__name__)
UpperCAmelCase_ ={
"""google/switch-base-8""": """https://huggingface.co/google/switch-base-8/blob/main/config.json""",
}
class __UpperCamelCase ( __UpperCAmelCase ):
'''simple docstring'''
__a : Any ="""switch_transformers"""
__a : Union[str, Any] =["""past_key_values"""]
__a : Dict ={"""hidden_size""": """d_model""", """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers"""}
def __init__( self , UpperCAmelCase_=3_21_28 , UpperCAmelCase_=7_68 , UpperCAmelCase_=64 , UpperCAmelCase_=20_48 , UpperCAmelCase_=64 , UpperCAmelCase_=12 , UpperCAmelCase_=3 , UpperCAmelCase_=12 , UpperCAmelCase_=3 , UpperCAmelCase_=12 , UpperCAmelCase_=8 , UpperCAmelCase_=False , UpperCAmelCase_=0.01 , UpperCAmelCase_="float32" , UpperCAmelCase_=False , UpperCAmelCase_=32 , UpperCAmelCase_=1_28 , UpperCAmelCase_=0.1 , UpperCAmelCase_=1E-6 , UpperCAmelCase_=0.001 , UpperCAmelCase_=0.001 , UpperCAmelCase_=1.0 , UpperCAmelCase_="relu" , UpperCAmelCase_=True , UpperCAmelCase_=False , UpperCAmelCase_=True , UpperCAmelCase_=0 , UpperCAmelCase_=1 , **UpperCAmelCase_ , ):
lowerCAmelCase = vocab_size
lowerCAmelCase = d_model
lowerCAmelCase = d_kv
lowerCAmelCase = d_ff
lowerCAmelCase = num_sparse_encoder_layers
lowerCAmelCase = num_layers
lowerCAmelCase = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
lowerCAmelCase = num_sparse_decoder_layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_encoder_layers > 0:
lowerCAmelCase = self.num_layers // self.num_sparse_encoder_layers
else:
lowerCAmelCase = self.num_layers # HACK: this will create 0 sparse layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_decoder_layers > 0:
lowerCAmelCase = self.num_decoder_layers // self.num_sparse_decoder_layers
else:
lowerCAmelCase = self.num_decoder_layers # HACK: this will create 0 sparse layers
lowerCAmelCase = num_heads
lowerCAmelCase = num_experts
lowerCAmelCase = expert_capacity
lowerCAmelCase = router_bias
lowerCAmelCase = router_jitter_noise
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(F"""`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}""" )
lowerCAmelCase = router_dtype
lowerCAmelCase = router_ignore_padding_tokens
lowerCAmelCase = relative_attention_num_buckets
lowerCAmelCase = relative_attention_max_distance
lowerCAmelCase = dropout_rate
lowerCAmelCase = layer_norm_epsilon
lowerCAmelCase = initializer_factor
lowerCAmelCase = feed_forward_proj
lowerCAmelCase = use_cache
lowerCAmelCase = add_router_probs
lowerCAmelCase = router_z_loss_coef
lowerCAmelCase = router_aux_loss_coef
lowerCAmelCase = self.feed_forward_proj.split('''-''' )
lowerCAmelCase = act_info[-1]
lowerCAmelCase = act_info[0] == '''gated'''
if len(UpperCAmelCase_ ) > 1 and act_info[0] != "gated" or len(UpperCAmelCase_ ) > 2:
raise ValueError(
F"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
'''Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '''
'''\'gated-gelu\' or \'relu\'''' )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
lowerCAmelCase = '''gelu_new'''
super().__init__(
pad_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , is_encoder_decoder=UpperCAmelCase_ , **UpperCAmelCase_ , )
| 33
| 0
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
UpperCAmelCase_ =logging.get_logger(__name__)
UpperCAmelCase_ ={
"""allenai/longformer-base-4096""": """https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json""",
"""allenai/longformer-large-4096""": """https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json""",
"""allenai/longformer-large-4096-finetuned-triviaqa""": (
"""https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json"""
),
"""allenai/longformer-base-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json"""
),
"""allenai/longformer-large-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json"""
),
}
class __UpperCamelCase ( __UpperCAmelCase ):
'''simple docstring'''
__a : Tuple ="""longformer"""
def __init__( self , UpperCAmelCase_ = 5_12 , UpperCAmelCase_ = 2 , UpperCAmelCase_ = 1 , UpperCAmelCase_ = 0 , UpperCAmelCase_ = 2 , UpperCAmelCase_ = 3_05_22 , UpperCAmelCase_ = 7_68 , UpperCAmelCase_ = 12 , UpperCAmelCase_ = 12 , UpperCAmelCase_ = 30_72 , UpperCAmelCase_ = "gelu" , UpperCAmelCase_ = 0.1 , UpperCAmelCase_ = 0.1 , UpperCAmelCase_ = 5_12 , UpperCAmelCase_ = 2 , UpperCAmelCase_ = 0.02 , UpperCAmelCase_ = 1E-1_2 , UpperCAmelCase_ = False , **UpperCAmelCase_ , ):
super().__init__(pad_token_id=UpperCAmelCase_ , **UpperCAmelCase_ )
lowerCAmelCase = attention_window
lowerCAmelCase = sep_token_id
lowerCAmelCase = bos_token_id
lowerCAmelCase = eos_token_id
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = hidden_act
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = initializer_range
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = onnx_export
class __UpperCamelCase ( __UpperCAmelCase ):
'''simple docstring'''
def __init__( self , UpperCAmelCase_ , UpperCAmelCase_ = "default" , UpperCAmelCase_ = None ):
super().__init__(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
lowerCAmelCase = True
@property
def __snake_case ( self ):
if self.task == "multiple-choice":
lowerCAmelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowerCAmelCase = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''global_attention_mask''', dynamic_axis),
] )
@property
def __snake_case ( self ):
lowerCAmelCase = super().outputs
if self.task == "default":
lowerCAmelCase = {0: '''batch'''}
return outputs
@property
def __snake_case ( self ):
return 1E-4
@property
def __snake_case ( self ):
# needs to be >= 14 to support tril operator
return max(super().default_onnx_opset , 14 )
def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ = -1 , UpperCAmelCase_ = -1 , UpperCAmelCase_ = False , UpperCAmelCase_ = None , ):
lowerCAmelCase = super().generate_dummy_inputs(
preprocessor=UpperCAmelCase_ , batch_size=UpperCAmelCase_ , seq_length=UpperCAmelCase_ , is_pair=UpperCAmelCase_ , framework=UpperCAmelCase_ )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
lowerCAmelCase = torch.zeros_like(inputs['''input_ids'''] )
# make every second token global
lowerCAmelCase = 1
return inputs
| 708
|
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def UpperCAmelCase ( _snake_case ):
lowerCAmelCase , lowerCAmelCase = analyze_text(_snake_case )
lowerCAmelCase = list(''' ''' + ascii_lowercase )
# what is our total sum of probabilities.
lowerCAmelCase = sum(single_char_strings.values() )
# one length string
lowerCAmelCase = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
lowerCAmelCase = single_char_strings[ch]
lowerCAmelCase = my_str / all_sum
my_fir_sum += prob * math.loga(_snake_case ) # entropy formula.
# print entropy
print(F"""{round(-1 * my_fir_sum ):.1f}""" )
# two len string
lowerCAmelCase = sum(two_char_strings.values() )
lowerCAmelCase = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
lowerCAmelCase = cha + cha
if sequence in two_char_strings:
lowerCAmelCase = two_char_strings[sequence]
lowerCAmelCase = int(_snake_case ) / all_sum
my_sec_sum += prob * math.loga(_snake_case )
# print second entropy
print(F"""{round(-1 * my_sec_sum ):.1f}""" )
# print the difference between them
print(F"""{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}""" )
def UpperCAmelCase ( _snake_case ):
lowerCAmelCase = Counter() # type: ignore
lowerCAmelCase = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 , len(_snake_case ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def UpperCAmelCase ( ):
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 33
| 0
|
UpperCAmelCase_ ="""Alexander Joslin"""
import operator as op
from .stack import Stack
def UpperCAmelCase ( _snake_case ):
lowerCAmelCase = {'''*''': op.mul, '''/''': op.truediv, '''+''': op.add, '''-''': op.sub}
lowerCAmelCase = Stack()
lowerCAmelCase = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(_snake_case ) )
elif i in operators:
# RULE 2
operator_stack.push(_snake_case )
elif i == ")":
# RULE 4
lowerCAmelCase = operator_stack.peek()
operator_stack.pop()
lowerCAmelCase = operand_stack.peek()
operand_stack.pop()
lowerCAmelCase = operand_stack.peek()
operand_stack.pop()
lowerCAmelCase = operators[opr](_snake_case , _snake_case )
operand_stack.push(_snake_case )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
UpperCAmelCase_ ="""(5 + ((4 * 2) * (2 + 3)))"""
# answer = 45
print(F'''{equation} = {dijkstras_two_stack_algorithm(equation)}''')
| 709
|
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __UpperCamelCase ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
__a : Tuple =IFInpaintingSuperResolutionPipeline
__a : Dict =TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"""width""", """height"""}
__a : int =TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({"""original_image"""} )
__a : Union[str, Any] =PipelineTesterMixin.required_optional_params - {"""latents"""}
def __snake_case ( self ):
return self._get_superresolution_dummy_components()
def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_=0 ):
if str(UpperCAmelCase_ ).startswith('''mps''' ):
lowerCAmelCase = torch.manual_seed(UpperCAmelCase_ )
else:
lowerCAmelCase = torch.Generator(device=UpperCAmelCase_ ).manual_seed(UpperCAmelCase_ )
lowerCAmelCase = floats_tensor((1, 3, 16, 16) , rng=random.Random(UpperCAmelCase_ ) ).to(UpperCAmelCase_ )
lowerCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCAmelCase_ ) ).to(UpperCAmelCase_ )
lowerCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCAmelCase_ ) ).to(UpperCAmelCase_ )
lowerCAmelCase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''original_image''': original_image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def __snake_case ( self ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def __snake_case ( self ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' )
def __snake_case ( self ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def __snake_case ( self ):
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def __snake_case ( self ):
self._test_save_load_local()
def __snake_case ( self ):
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 33
| 0
|
import numpy as np
def UpperCAmelCase ( _snake_case ):
return 1 / (1 + np.exp(-vector ))
def UpperCAmelCase ( _snake_case ):
return vector * sigmoid(_snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 710
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
UpperCAmelCase_ ={
"""configuration_efficientformer""": [
"""EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""EfficientFormerConfig""",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ =["""EfficientFormerImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ =[
"""EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""EfficientFormerForImageClassification""",
"""EfficientFormerForImageClassificationWithTeacher""",
"""EfficientFormerModel""",
"""EfficientFormerPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ =[
"""TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFEfficientFormerForImageClassification""",
"""TFEfficientFormerForImageClassificationWithTeacher""",
"""TFEfficientFormerModel""",
"""TFEfficientFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientformer import EfficientFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientformer import (
EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientFormerForImageClassification,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerModel,
EfficientFormerPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
TFEfficientFormerPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 33
| 0
|
'''simple docstring'''
from collections.abc import Sequence
def UpperCAmelCase ( _snake_case , _snake_case = False ):
if not arr:
return 0
lowerCAmelCase = 0 if allow_empty_subarrays else float('''-inf''' )
lowerCAmelCase = 0.0
for num in arr:
lowerCAmelCase = max(0 if allow_empty_subarrays else num , curr_sum + num )
lowerCAmelCase = max(_snake_case , _snake_case )
return max_sum
if __name__ == "__main__":
from doctest import testmod
testmod()
UpperCAmelCase_ =[-2, 1, -3, 4, -1, 2, 1, -5, 4]
print(F'''{max_subarray_sum(nums) = }''')
| 711
|
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
UpperCAmelCase_ =datasets.utils.logging.get_logger(__name__)
@dataclass
class __UpperCamelCase ( datasets.BuilderConfig ):
'''simple docstring'''
__a : Optional[datasets.Features] =None
__a : str ="utf-8"
__a : Optional[str] =None
__a : Optional[str] =None
__a : bool =True # deprecated
__a : Optional[int] =None # deprecated
__a : int =1_0 << 2_0 # 10MB
__a : Optional[bool] =None
class __UpperCamelCase ( datasets.ArrowBasedBuilder ):
'''simple docstring'''
__a : str =JsonConfig
def __snake_case ( self ):
if self.config.block_size is not None:
logger.warning('''The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead''' )
lowerCAmelCase = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
'''The JSON loader parameter `use_threads` is deprecated and doesn\'t have any effect anymore.''' )
if self.config.newlines_in_values is not None:
raise ValueError('''The JSON loader parameter `newlines_in_values` is no longer supported''' )
return datasets.DatasetInfo(features=self.config.features )
def __snake_case ( self , UpperCAmelCase_ ):
if not self.config.data_files:
raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
lowerCAmelCase = dl_manager.download_and_extract(self.config.data_files )
if isinstance(UpperCAmelCase_ , (str, list, tuple) ):
lowerCAmelCase = data_files
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
lowerCAmelCase = [files]
lowerCAmelCase = [dl_manager.iter_files(UpperCAmelCase_ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )]
lowerCAmelCase = []
for split_name, files in data_files.items():
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
lowerCAmelCase = [files]
lowerCAmelCase = [dl_manager.iter_files(UpperCAmelCase_ ) for file in files]
splits.append(datasets.SplitGenerator(name=UpperCAmelCase_ , gen_kwargs={'''files''': files} ) )
return splits
def __snake_case ( self , UpperCAmelCase_ ):
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features ) - set(pa_table.column_names ):
lowerCAmelCase = self.config.features.arrow_schema.field(UpperCAmelCase_ ).type
lowerCAmelCase = pa_table.append_column(UpperCAmelCase_ , pa.array([None] * len(UpperCAmelCase_ ) , type=UpperCAmelCase_ ) )
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
lowerCAmelCase = table_cast(UpperCAmelCase_ , self.config.features.arrow_schema )
return pa_table
def __snake_case ( self , UpperCAmelCase_ ):
for file_idx, file in enumerate(itertools.chain.from_iterable(UpperCAmelCase_ ) ):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(UpperCAmelCase_ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
lowerCAmelCase = json.load(UpperCAmelCase_ )
# We keep only the field we are interested in
lowerCAmelCase = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(UpperCAmelCase_ , (list, tuple) ):
lowerCAmelCase = set().union(*[row.keys() for row in dataset] )
lowerCAmelCase = {col: [row.get(UpperCAmelCase_ ) for row in dataset] for col in keys}
else:
lowerCAmelCase = dataset
lowerCAmelCase = pa.Table.from_pydict(UpperCAmelCase_ )
yield file_idx, self._cast_table(UpperCAmelCase_ )
# If the file has one json object per line
else:
with open(UpperCAmelCase_ , '''rb''' ) as f:
lowerCAmelCase = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
lowerCAmelCase = max(self.config.chunksize // 32 , 16 << 10 )
lowerCAmelCase = (
self.config.encoding_errors if self.config.encoding_errors is not None else '''strict'''
)
while True:
lowerCAmelCase = f.read(self.config.chunksize )
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(UpperCAmelCase_ )
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
lowerCAmelCase = batch.decode(self.config.encoding , errors=UpperCAmelCase_ ).encode('''utf-8''' )
try:
while True:
try:
lowerCAmelCase = paj.read_json(
io.BytesIO(UpperCAmelCase_ ) , read_options=paj.ReadOptions(block_size=UpperCAmelCase_ ) )
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(UpperCAmelCase_ , pa.ArrowInvalid )
and "straddling" not in str(UpperCAmelCase_ )
or block_size > len(UpperCAmelCase_ )
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
F"""Batch of {len(UpperCAmelCase_ )} bytes couldn't be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.""" )
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
UpperCAmelCase_ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
lowerCAmelCase = json.load(UpperCAmelCase_ )
except json.JSONDecodeError:
logger.error(F"""Failed to read file '{file}' with error {type(UpperCAmelCase_ )}: {e}""" )
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): # list is the only sequence type supported in JSON
try:
lowerCAmelCase = set().union(*[row.keys() for row in dataset] )
lowerCAmelCase = {col: [row.get(UpperCAmelCase_ ) for row in dataset] for col in keys}
lowerCAmelCase = pa.Table.from_pydict(UpperCAmelCase_ )
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(F"""Failed to read file '{file}' with error {type(UpperCAmelCase_ )}: {e}""" )
raise ValueError(F"""Not able to read records in the JSON file at {file}.""" ) from None
yield file_idx, self._cast_table(UpperCAmelCase_ )
break
else:
logger.error(F"""Failed to read file '{file}' with error {type(UpperCAmelCase_ )}: {e}""" )
raise ValueError(
F"""Not able to read records in the JSON file at {file}. """
F"""You should probably indicate the field of the JSON file containing your records. """
F"""This JSON file contain the following fields: {str(list(dataset.keys() ) )}. """
F"""Select the correct one and provide it as `field='XXX'` to the dataset loading method. """ ) from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(UpperCAmelCase_ )
batch_idx += 1
| 33
| 0
|
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
UpperCAmelCase_ =logging.get_logger(__name__)
UpperCAmelCase_ ={
"""t5-small""": """https://huggingface.co/t5-small/resolve/main/config.json""",
"""t5-base""": """https://huggingface.co/t5-base/resolve/main/config.json""",
"""t5-large""": """https://huggingface.co/t5-large/resolve/main/config.json""",
"""t5-3b""": """https://huggingface.co/t5-3b/resolve/main/config.json""",
"""t5-11b""": """https://huggingface.co/t5-11b/resolve/main/config.json""",
}
class __UpperCamelCase ( __UpperCAmelCase ):
'''simple docstring'''
__a : str ="""t5"""
__a : List[Any] =["""past_key_values"""]
__a : Optional[Any] ={"""hidden_size""": """d_model""", """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers"""}
def __init__( self , UpperCAmelCase_=3_21_28 , UpperCAmelCase_=5_12 , UpperCAmelCase_=64 , UpperCAmelCase_=20_48 , UpperCAmelCase_=6 , UpperCAmelCase_=None , UpperCAmelCase_=8 , UpperCAmelCase_=32 , UpperCAmelCase_=1_28 , UpperCAmelCase_=0.1 , UpperCAmelCase_=1E-6 , UpperCAmelCase_=1.0 , UpperCAmelCase_="relu" , UpperCAmelCase_=True , UpperCAmelCase_=True , UpperCAmelCase_=0 , UpperCAmelCase_=1 , **UpperCAmelCase_ , ):
lowerCAmelCase = vocab_size
lowerCAmelCase = d_model
lowerCAmelCase = d_kv
lowerCAmelCase = d_ff
lowerCAmelCase = num_layers
lowerCAmelCase = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
lowerCAmelCase = num_heads
lowerCAmelCase = relative_attention_num_buckets
lowerCAmelCase = relative_attention_max_distance
lowerCAmelCase = dropout_rate
lowerCAmelCase = layer_norm_epsilon
lowerCAmelCase = initializer_factor
lowerCAmelCase = feed_forward_proj
lowerCAmelCase = use_cache
lowerCAmelCase = self.feed_forward_proj.split('''-''' )
lowerCAmelCase = act_info[-1]
lowerCAmelCase = act_info[0] == '''gated'''
if len(UpperCAmelCase_ ) > 1 and act_info[0] != "gated" or len(UpperCAmelCase_ ) > 2:
raise ValueError(
F"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
'''Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '''
'''\'gated-gelu\' or \'relu\'''' )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
lowerCAmelCase = '''gelu_new'''
super().__init__(
pad_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , is_encoder_decoder=UpperCAmelCase_ , **UpperCAmelCase_ , )
class __UpperCamelCase ( __UpperCAmelCase ):
'''simple docstring'''
@property
def __snake_case ( self ):
lowerCAmelCase = {
'''input_ids''': {0: '''batch''', 1: '''encoder_sequence'''},
'''attention_mask''': {0: '''batch''', 1: '''encoder_sequence'''},
}
if self.use_past:
lowerCAmelCase = '''past_encoder_sequence + sequence'''
lowerCAmelCase = {0: '''batch'''}
lowerCAmelCase = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
lowerCAmelCase = {0: '''batch''', 1: '''decoder_sequence'''}
lowerCAmelCase = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(UpperCAmelCase_ , direction='''inputs''' )
return common_inputs
@property
def __snake_case ( self ):
return 13
| 712
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
UpperCAmelCase_ =logging.get_logger(__name__)
class __UpperCamelCase ( __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__a : Optional[Any] ="""maskformer-swin"""
__a : Optional[int] ={
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self , UpperCAmelCase_=2_24 , UpperCAmelCase_=4 , UpperCAmelCase_=3 , UpperCAmelCase_=96 , UpperCAmelCase_=[2, 2, 6, 2] , UpperCAmelCase_=[3, 6, 12, 24] , UpperCAmelCase_=7 , UpperCAmelCase_=4.0 , UpperCAmelCase_=True , UpperCAmelCase_=0.0 , UpperCAmelCase_=0.0 , UpperCAmelCase_=0.1 , UpperCAmelCase_="gelu" , UpperCAmelCase_=False , UpperCAmelCase_=0.02 , UpperCAmelCase_=1E-5 , UpperCAmelCase_=None , UpperCAmelCase_=None , **UpperCAmelCase_ , ):
super().__init__(**UpperCAmelCase_ )
lowerCAmelCase = image_size
lowerCAmelCase = patch_size
lowerCAmelCase = num_channels
lowerCAmelCase = embed_dim
lowerCAmelCase = depths
lowerCAmelCase = len(UpperCAmelCase_ )
lowerCAmelCase = num_heads
lowerCAmelCase = window_size
lowerCAmelCase = mlp_ratio
lowerCAmelCase = qkv_bias
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = drop_path_rate
lowerCAmelCase = hidden_act
lowerCAmelCase = use_absolute_embeddings
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowerCAmelCase = int(embed_dim * 2 ** (len(UpperCAmelCase_ ) - 1) )
lowerCAmelCase = ['''stem'''] + [F"""stage{idx}""" for idx in range(1 , len(UpperCAmelCase_ ) + 1 )]
lowerCAmelCase , lowerCAmelCase = get_aligned_output_features_output_indices(
out_features=UpperCAmelCase_ , out_indices=UpperCAmelCase_ , stage_names=self.stage_names )
| 33
| 0
|
import pytest
from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs
@pytest.mark.parametrize(
'''kwargs, expected''' , [
({'''num_shards''': 0, '''max_num_jobs''': 1}, []),
({'''num_shards''': 10, '''max_num_jobs''': 1}, [range(10 )]),
({'''num_shards''': 10, '''max_num_jobs''': 10}, [range(_snake_case , i + 1 ) for i in range(10 )]),
({'''num_shards''': 1, '''max_num_jobs''': 10}, [range(1 )]),
({'''num_shards''': 10, '''max_num_jobs''': 3}, [range(0 , 4 ), range(4 , 7 ), range(7 , 10 )]),
({'''num_shards''': 3, '''max_num_jobs''': 10}, [range(0 , 1 ), range(1 , 2 ), range(2 , 3 )]),
] , )
def UpperCAmelCase ( _snake_case , _snake_case ):
lowerCAmelCase = _distribute_shards(**_snake_case )
assert out == expected
@pytest.mark.parametrize(
'''gen_kwargs, max_num_jobs, expected''' , [
({'''foo''': 0}, 10, [{'''foo''': 0}]),
({'''shards''': [0, 1, 2, 3]}, 1, [{'''shards''': [0, 1, 2, 3]}]),
({'''shards''': [0, 1, 2, 3]}, 4, [{'''shards''': [0]}, {'''shards''': [1]}, {'''shards''': [2]}, {'''shards''': [3]}]),
({'''shards''': [0, 1]}, 4, [{'''shards''': [0]}, {'''shards''': [1]}]),
({'''shards''': [0, 1, 2, 3]}, 2, [{'''shards''': [0, 1]}, {'''shards''': [2, 3]}]),
] , )
def UpperCAmelCase ( _snake_case , _snake_case , _snake_case ):
lowerCAmelCase = _split_gen_kwargs(_snake_case , _snake_case )
assert out == expected
@pytest.mark.parametrize(
'''gen_kwargs, expected''' , [
({'''foo''': 0}, 1),
({'''shards''': [0]}, 1),
({'''shards''': [0, 1, 2, 3]}, 4),
({'''shards''': [0, 1, 2, 3], '''foo''': 0}, 4),
({'''shards''': [0, 1, 2, 3], '''other''': (0, 1)}, 4),
({'''shards''': [0, 1, 2, 3], '''shards2''': [0, 1]}, RuntimeError),
] , )
def UpperCAmelCase ( _snake_case , _snake_case ):
if expected is RuntimeError:
with pytest.raises(_snake_case ):
_number_of_shards_in_gen_kwargs(_snake_case )
else:
lowerCAmelCase = _number_of_shards_in_gen_kwargs(_snake_case )
assert out == expected
| 713
|
from collections.abc import Sequence
def UpperCAmelCase ( _snake_case , _snake_case = False ):
if not arr:
return 0
lowerCAmelCase = 0 if allow_empty_subarrays else float('''-inf''' )
lowerCAmelCase = 0.0
for num in arr:
lowerCAmelCase = max(0 if allow_empty_subarrays else num , curr_sum + num )
lowerCAmelCase = max(_snake_case , _snake_case )
return max_sum
if __name__ == "__main__":
from doctest import testmod
testmod()
UpperCAmelCase_ =[-2, 1, -3, 4, -1, 2, 1, -5, 4]
print(F'''{max_subarray_sum(nums) = }''')
| 33
| 0
|
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class __UpperCamelCase ( __UpperCAmelCase ):
'''simple docstring'''
__a : List[Any] =["""image_processor""", """tokenizer"""]
__a : Dict ="""AutoImageProcessor"""
__a : Tuple ="""AutoTokenizer"""
def __init__( self , UpperCAmelCase_=None , UpperCAmelCase_=None , **UpperCAmelCase_ ):
lowerCAmelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , UpperCAmelCase_ , )
lowerCAmelCase = kwargs.pop('''feature_extractor''' )
lowerCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(UpperCAmelCase_ , UpperCAmelCase_ )
lowerCAmelCase = self.image_processor
lowerCAmelCase = False
def __call__( self , *UpperCAmelCase_ , **UpperCAmelCase_ ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*UpperCAmelCase_ , **UpperCAmelCase_ )
lowerCAmelCase = kwargs.pop('''images''' , UpperCAmelCase_ )
lowerCAmelCase = kwargs.pop('''text''' , UpperCAmelCase_ )
if len(UpperCAmelCase_ ) > 0:
lowerCAmelCase = args[0]
lowerCAmelCase = args[1:]
if images is None and text is None:
raise ValueError('''You need to specify either an `images` or `text` input to process.''' )
if images is not None:
lowerCAmelCase = self.image_processor(UpperCAmelCase_ , *UpperCAmelCase_ , **UpperCAmelCase_ )
if text is not None:
lowerCAmelCase = self.tokenizer(UpperCAmelCase_ , **UpperCAmelCase_ )
if text is None:
return inputs
elif images is None:
return encodings
else:
lowerCAmelCase = encodings['''input_ids''']
return inputs
def __snake_case ( self , *UpperCAmelCase_ , **UpperCAmelCase_ ):
return self.tokenizer.batch_decode(*UpperCAmelCase_ , **UpperCAmelCase_ )
def __snake_case ( self , *UpperCAmelCase_ , **UpperCAmelCase_ ):
return self.tokenizer.decode(*UpperCAmelCase_ , **UpperCAmelCase_ )
@contextmanager
def __snake_case ( self ):
warnings.warn(
'''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '''
'''labels by using the argument `text` of the regular `__call__` method (either in the same call as '''
'''your images inputs, or in a separate call.''' )
lowerCAmelCase = True
lowerCAmelCase = self.tokenizer
yield
lowerCAmelCase = self.image_processor
lowerCAmelCase = False
def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_=False , UpperCAmelCase_=None ):
if added_vocab is None:
lowerCAmelCase = self.tokenizer.get_added_vocab()
lowerCAmelCase = {}
while tokens:
lowerCAmelCase = re.search(r'''<s_(.*?)>''' , UpperCAmelCase_ , re.IGNORECASE )
if start_token is None:
break
lowerCAmelCase = start_token.group(1 )
lowerCAmelCase = re.search(rF"""</s_{key}>""" , UpperCAmelCase_ , re.IGNORECASE )
lowerCAmelCase = start_token.group()
if end_token is None:
lowerCAmelCase = tokens.replace(UpperCAmelCase_ , '''''' )
else:
lowerCAmelCase = end_token.group()
lowerCAmelCase = re.escape(UpperCAmelCase_ )
lowerCAmelCase = re.escape(UpperCAmelCase_ )
lowerCAmelCase = re.search(F"""{start_token_escaped}(.*?){end_token_escaped}""" , UpperCAmelCase_ , re.IGNORECASE )
if content is not None:
lowerCAmelCase = content.group(1 ).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
lowerCAmelCase = self.tokenajson(UpperCAmelCase_ , is_inner_value=UpperCAmelCase_ , added_vocab=UpperCAmelCase_ )
if value:
if len(UpperCAmelCase_ ) == 1:
lowerCAmelCase = value[0]
lowerCAmelCase = value
else: # leaf nodes
lowerCAmelCase = []
for leaf in content.split(r'''<sep/>''' ):
lowerCAmelCase = leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
lowerCAmelCase = leaf[1:-2] # for categorical special tokens
output[key].append(UpperCAmelCase_ )
if len(output[key] ) == 1:
lowerCAmelCase = output[key][0]
lowerCAmelCase = tokens[tokens.find(UpperCAmelCase_ ) + len(UpperCAmelCase_ ) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:] , is_inner_value=UpperCAmelCase_ , added_vocab=UpperCAmelCase_ )
if len(UpperCAmelCase_ ):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def __snake_case ( self ):
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , UpperCAmelCase_ , )
return self.image_processor_class
@property
def __snake_case ( self ):
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , UpperCAmelCase_ , )
return self.image_processor
| 714
|
import os
import pickle
import unittest
from transformers import AutoTokenizer
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.models.bert_japanese.tokenization_bert_japanese import (
VOCAB_FILES_NAMES,
BertJapaneseTokenizer,
CharacterTokenizer,
JumanppTokenizer,
MecabTokenizer,
SudachiTokenizer,
WordpieceTokenizer,
)
from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi
from ...test_tokenization_common import TokenizerTesterMixin
@custom_tokenizers
class __UpperCamelCase ( __UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
__a : Any =BertJapaneseTokenizer
__a : Optional[int] =False
__a : int =True
def __snake_case ( self ):
super().setUp()
lowerCAmelCase = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''こんにちは''',
'''こん''',
'''にちは''',
'''ばんは''',
'''##こん''',
'''##にちは''',
'''##ばんは''',
'''世界''',
'''##世界''',
'''、''',
'''##、''',
'''。''',
'''##。''',
]
lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def __snake_case ( self , UpperCAmelCase_ ):
lowerCAmelCase = '''こんにちは、世界。 \nこんばんは、世界。'''
lowerCAmelCase = '''こんにちは 、 世界 。 こんばんは 、 世界 。'''
return input_text, output_text
def __snake_case ( self , UpperCAmelCase_ ):
lowerCAmelCase , lowerCAmelCase = self.get_input_output_texts(UpperCAmelCase_ )
lowerCAmelCase = tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ )
lowerCAmelCase = tokenizer.decode(UpperCAmelCase_ , clean_up_tokenization_spaces=UpperCAmelCase_ )
return text, ids
def __snake_case ( self ):
pass # TODO add if relevant
def __snake_case ( self ):
pass # TODO add if relevant
def __snake_case ( self ):
pass # TODO add if relevant
def __snake_case ( self ):
lowerCAmelCase = self.tokenizer_class(self.vocab_file )
lowerCAmelCase = tokenizer.tokenize('''こんにちは、世界。\nこんばんは、世界。''' )
self.assertListEqual(UpperCAmelCase_ , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
def __snake_case ( self ):
lowerCAmelCase = self.tokenizer_class(self.vocab_file , word_tokenizer_type='''mecab''' )
self.assertIsNotNone(UpperCAmelCase_ )
lowerCAmelCase = '''こんにちは、世界。\nこんばんは、世界。'''
lowerCAmelCase = tokenizer.tokenize(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
lowerCAmelCase = os.path.join(self.tmpdirname , '''tokenizer.bin''' )
with open(UpperCAmelCase_ , '''wb''' ) as handle:
pickle.dump(UpperCAmelCase_ , UpperCAmelCase_ )
with open(UpperCAmelCase_ , '''rb''' ) as handle:
lowerCAmelCase = pickle.load(UpperCAmelCase_ )
lowerCAmelCase = tokenizer_new.tokenize(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def __snake_case ( self ):
lowerCAmelCase = MecabTokenizer(mecab_dic='''ipadic''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップルストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , )
def __snake_case ( self ):
try:
lowerCAmelCase = MecabTokenizer(mecab_dic='''unidic_lite''' )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , )
def __snake_case ( self ):
try:
lowerCAmelCase = MecabTokenizer(mecab_dic='''unidic''' )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , )
def __snake_case ( self ):
lowerCAmelCase = MecabTokenizer(do_lower_case=UpperCAmelCase_ , mecab_dic='''ipadic''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップルストア''', '''で''', '''iphone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , )
def __snake_case ( self ):
try:
lowerCAmelCase = MecabTokenizer(
do_lower_case=UpperCAmelCase_ , normalize_text=UpperCAmelCase_ , mecab_option='''-d /usr/local/lib/mecab/dic/jumandic''' )
except RuntimeError:
# if dict doesn't exist in the system, previous code raises this error.
return
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップルストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , )
def __snake_case ( self ):
lowerCAmelCase = MecabTokenizer(normalize_text=UpperCAmelCase_ , mecab_dic='''ipadic''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップルストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', ''' ''', '''。'''] , )
@require_sudachi
def __snake_case ( self ):
lowerCAmelCase = self.tokenizer_class(self.vocab_file , word_tokenizer_type='''sudachi''' )
self.assertIsNotNone(UpperCAmelCase_ )
lowerCAmelCase = '''こんにちは、世界。\nこんばんは、世界。'''
lowerCAmelCase = tokenizer.tokenize(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
lowerCAmelCase = os.path.join(self.tmpdirname , '''tokenizer.bin''' )
with open(UpperCAmelCase_ , '''wb''' ) as handle:
pickle.dump(UpperCAmelCase_ , UpperCAmelCase_ )
with open(UpperCAmelCase_ , '''rb''' ) as handle:
lowerCAmelCase = pickle.load(UpperCAmelCase_ )
lowerCAmelCase = tokenizer_new.tokenize(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
@require_sudachi
def __snake_case ( self ):
lowerCAmelCase = SudachiTokenizer(sudachi_dict_type='''core''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , [''' ''', '''\t''', '''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', ''' ''', '''が''', ''' ''', ''' ''', '''\n ''', '''発売''', '''さ''', '''れ''', '''た''', ''' ''', '''。''', ''' ''', ''' '''] , )
@require_sudachi
def __snake_case ( self ):
lowerCAmelCase = SudachiTokenizer(sudachi_dict_type='''core''' , sudachi_split_mode='''A''' )
self.assertListEqual(tokenizer.tokenize('''外国人参政権''' ) , ['''外国''', '''人''', '''参政''', '''権'''] )
@require_sudachi
def __snake_case ( self ):
lowerCAmelCase = SudachiTokenizer(sudachi_dict_type='''core''' , sudachi_split_mode='''B''' )
self.assertListEqual(tokenizer.tokenize('''外国人参政権''' ) , ['''外国人''', '''参政権'''] )
@require_sudachi
def __snake_case ( self ):
lowerCAmelCase = SudachiTokenizer(sudachi_dict_type='''core''' , sudachi_split_mode='''C''' )
self.assertListEqual(tokenizer.tokenize('''外国人参政権''' ) , ['''外国人参政権'''] )
@require_sudachi
def __snake_case ( self ):
lowerCAmelCase = SudachiTokenizer(do_lower_case=UpperCAmelCase_ , sudachi_dict_type='''core''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , [''' ''', '''\t''', '''アップル''', '''ストア''', '''で''', '''iphone''', '''8''', ''' ''', '''が''', ''' ''', ''' ''', '''\n ''', '''発売''', '''さ''', '''れ''', '''た''', ''' ''', '''。''', ''' ''', ''' '''] , )
@require_sudachi
def __snake_case ( self ):
lowerCAmelCase = SudachiTokenizer(normalize_text=UpperCAmelCase_ , sudachi_dict_type='''core''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , [''' ''', '''\t''', '''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', ''' ''', '''が''', ''' ''', ''' ''', '''\n ''', '''発売''', '''さ''', '''れ''', '''た''', '''\u3000''', '''。''', ''' ''', ''' '''] , )
@require_sudachi
def __snake_case ( self ):
lowerCAmelCase = SudachiTokenizer(trim_whitespace=UpperCAmelCase_ , sudachi_dict_type='''core''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , )
@require_jumanpp
def __snake_case ( self ):
lowerCAmelCase = self.tokenizer_class(self.vocab_file , word_tokenizer_type='''jumanpp''' )
self.assertIsNotNone(UpperCAmelCase_ )
lowerCAmelCase = '''こんにちは、世界。\nこんばんは、世界。'''
lowerCAmelCase = tokenizer.tokenize(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
lowerCAmelCase = os.path.join(self.tmpdirname , '''tokenizer.bin''' )
with open(UpperCAmelCase_ , '''wb''' ) as handle:
pickle.dump(UpperCAmelCase_ , UpperCAmelCase_ )
with open(UpperCAmelCase_ , '''rb''' ) as handle:
lowerCAmelCase = pickle.load(UpperCAmelCase_ )
lowerCAmelCase = tokenizer_new.tokenize(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
@require_jumanpp
def __snake_case ( self ):
lowerCAmelCase = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''\u3000''', '''が''', '''\u3000''', '''\u3000''', '''\u3000''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , )
@require_jumanpp
def __snake_case ( self ):
lowerCAmelCase = JumanppTokenizer(do_lower_case=UpperCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iphone''', '''8''', '''\u3000''', '''が''', '''\u3000''', '''\u3000''', '''\u3000''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , )
@require_jumanpp
def __snake_case ( self ):
lowerCAmelCase = JumanppTokenizer(normalize_text=UpperCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''ア''', '''ッ''', '''フ''', '''゚''', '''ル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''\u3000''', '''が''', '''\u3000''', '''\u3000''', '''\u3000''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , )
@require_jumanpp
def __snake_case ( self ):
lowerCAmelCase = JumanppTokenizer(trim_whitespace=UpperCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れた''', '''。'''] , )
@require_jumanpp
def __snake_case ( self ):
lowerCAmelCase = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize('''ありがとうございますm(_ _)m見つけるのが大変です。''' ) , ['''ありがとう''', '''ございます''', '''m(_ _)m''', '''見つける''', '''の''', '''が''', '''大変です''', '''。'''] , )
def __snake_case ( self ):
lowerCAmelCase = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''こんにちは''', '''こん''', '''にちは''', '''ばんは''', '''##こん''', '''##にちは''', '''##ばんは''']
lowerCAmelCase = {}
for i, token in enumerate(UpperCAmelCase_ ):
lowerCAmelCase = i
lowerCAmelCase = WordpieceTokenizer(vocab=UpperCAmelCase_ , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''こんにちは''' ) , ['''こんにちは'''] )
self.assertListEqual(tokenizer.tokenize('''こんばんは''' ) , ['''こん''', '''##ばんは'''] )
self.assertListEqual(tokenizer.tokenize('''こんばんは こんばんにちは こんにちは''' ) , ['''こん''', '''##ばんは''', '''[UNK]''', '''こんにちは'''] )
def __snake_case ( self ):
lowerCAmelCase = BertJapaneseTokenizer.from_pretrained('''nlp-waseda/roberta-base-japanese-with-auto-jumanpp''' )
lowerCAmelCase = tokenizer.subword_tokenizer
lowerCAmelCase = subword_tokenizer.tokenize('''国境 の 長い トンネル を 抜ける と 雪国 であった 。''' )
self.assertListEqual(UpperCAmelCase_ , ['''▁国境''', '''▁の''', '''▁長い''', '''▁トンネル''', '''▁を''', '''▁抜ける''', '''▁と''', '''▁雪''', '''国''', '''▁であった''', '''▁。'''] )
lowerCAmelCase = subword_tokenizer.tokenize('''こんばんは こんばん にち は こんにちは''' )
self.assertListEqual(UpperCAmelCase_ , ['''▁こん''', '''ばん''', '''は''', '''▁こん''', '''ばん''', '''▁に''', '''ち''', '''▁は''', '''▁こんにちは'''] )
def __snake_case ( self ):
lowerCAmelCase = self.tokenizer_class.from_pretrained('''cl-tohoku/bert-base-japanese''' )
lowerCAmelCase = tokenizer.encode('''ありがとう。''' , add_special_tokens=UpperCAmelCase_ )
lowerCAmelCase = tokenizer.encode('''どういたしまして。''' , add_special_tokens=UpperCAmelCase_ )
lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_ )
lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_ , UpperCAmelCase_ )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class __UpperCamelCase ( __UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
__a : Union[str, Any] =BertJapaneseTokenizer
__a : Optional[int] =False
def __snake_case ( self ):
super().setUp()
lowerCAmelCase = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''こ''', '''ん''', '''に''', '''ち''', '''は''', '''ば''', '''世''', '''界''', '''、''', '''。''']
lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def __snake_case ( self , **UpperCAmelCase_ ):
return BertJapaneseTokenizer.from_pretrained(self.tmpdirname , subword_tokenizer_type='''character''' , **UpperCAmelCase_ )
def __snake_case ( self , UpperCAmelCase_ ):
lowerCAmelCase = '''こんにちは、世界。 \nこんばんは、世界。'''
lowerCAmelCase = '''こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。'''
return input_text, output_text
def __snake_case ( self ):
pass # TODO add if relevant
def __snake_case ( self ):
pass # TODO add if relevant
def __snake_case ( self ):
pass # TODO add if relevant
def __snake_case ( self ):
lowerCAmelCase = self.tokenizer_class(self.vocab_file , subword_tokenizer_type='''character''' )
lowerCAmelCase = tokenizer.tokenize('''こんにちは、世界。 \nこんばんは、世界。''' )
self.assertListEqual(
UpperCAmelCase_ , ['''こ''', '''ん''', '''に''', '''ち''', '''は''', '''、''', '''世''', '''界''', '''。''', '''こ''', '''ん''', '''ば''', '''ん''', '''は''', '''、''', '''世''', '''界''', '''。'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , [3, 4, 5, 6, 7, 11, 9, 10, 12, 3, 4, 8, 4, 7, 11, 9, 10, 12] )
def __snake_case ( self ):
lowerCAmelCase = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''こ''', '''ん''', '''に''', '''ち''', '''は''', '''ば''', '''世''', '''界''', '''、''', '''。''']
lowerCAmelCase = {}
for i, token in enumerate(UpperCAmelCase_ ):
lowerCAmelCase = i
lowerCAmelCase = CharacterTokenizer(vocab=UpperCAmelCase_ , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''こんにちは''' ) , ['''こ''', '''ん''', '''に''', '''ち''', '''は'''] )
self.assertListEqual(tokenizer.tokenize('''こんにちほ''' ) , ['''こ''', '''ん''', '''に''', '''ち''', '''[UNK]'''] )
def __snake_case ( self ):
lowerCAmelCase = self.tokenizer_class.from_pretrained('''cl-tohoku/bert-base-japanese-char''' )
lowerCAmelCase = tokenizer.encode('''ありがとう。''' , add_special_tokens=UpperCAmelCase_ )
lowerCAmelCase = tokenizer.encode('''どういたしまして。''' , add_special_tokens=UpperCAmelCase_ )
lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_ )
lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_ , UpperCAmelCase_ )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class __UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __snake_case ( self ):
lowerCAmelCase = '''cl-tohoku/bert-base-japanese'''
lowerCAmelCase = AutoTokenizer.from_pretrained(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
class __UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __snake_case ( self ):
lowerCAmelCase = '''cl-tohoku/bert-base-japanese'''
with self.assertLogs('''transformers''' , level='''WARNING''' ) as cm:
BertTokenizer.from_pretrained(UpperCAmelCase_ )
self.assertTrue(
cm.records[0].message.startswith(
'''The tokenizer class you load from this checkpoint is not the same type as the class this function'''
''' is called from.''' ) )
lowerCAmelCase = '''bert-base-cased'''
with self.assertLogs('''transformers''' , level='''WARNING''' ) as cm:
BertJapaneseTokenizer.from_pretrained(UpperCAmelCase_ )
self.assertTrue(
cm.records[0].message.startswith(
'''The tokenizer class you load from this checkpoint is not the same type as the class this function'''
''' is called from.''' ) )
| 33
| 0
|
import torch
from diffusers import StableDiffusionPipeline
UpperCAmelCase_ ="""path-to-your-trained-model"""
UpperCAmelCase_ =StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to("""cuda""")
UpperCAmelCase_ ="""A photo of sks dog in a bucket"""
UpperCAmelCase_ =pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0]
image.save("""dog-bucket.png""")
| 715
|
import json
import os
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from requests.exceptions import HTTPError
from transformers.utils import (
CONFIG_NAME,
FLAX_WEIGHTS_NAME,
TF2_WEIGHTS_NAME,
TRANSFORMERS_CACHE,
WEIGHTS_NAME,
cached_file,
get_file_from_repo,
has_file,
)
UpperCAmelCase_ ="""hf-internal-testing/tiny-random-bert"""
UpperCAmelCase_ =os.path.join(TRANSFORMERS_CACHE, """models--hf-internal-testing--tiny-random-bert""")
UpperCAmelCase_ ="""9b8c223d42b2188cb49d29af482996f9d0f3e5a6"""
class __UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __snake_case ( self ):
lowerCAmelCase = cached_file(UpperCAmelCase_ , UpperCAmelCase_ )
# Should have downloaded the file in here
self.assertTrue(os.path.isdir(UpperCAmelCase_ ) )
# Cache should contain at least those three subfolders:
for subfolder in ["blobs", "refs", "snapshots"]:
self.assertTrue(os.path.isdir(os.path.join(UpperCAmelCase_ , UpperCAmelCase_ ) ) )
with open(os.path.join(UpperCAmelCase_ , '''refs''' , '''main''' ) ) as f:
lowerCAmelCase = f.read()
self.assertEqual(UpperCAmelCase_ , os.path.join(UpperCAmelCase_ , '''snapshots''' , UpperCAmelCase_ , UpperCAmelCase_ ) )
self.assertTrue(os.path.isfile(UpperCAmelCase_ ) )
# File is cached at the same place the second time.
lowerCAmelCase = cached_file(UpperCAmelCase_ , UpperCAmelCase_ )
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ )
# Using a specific revision to test the full commit hash.
lowerCAmelCase = cached_file(UpperCAmelCase_ , UpperCAmelCase_ , revision='''9b8c223''' )
self.assertEqual(UpperCAmelCase_ , os.path.join(UpperCAmelCase_ , '''snapshots''' , UpperCAmelCase_ , UpperCAmelCase_ ) )
def __snake_case ( self ):
with self.assertRaisesRegex(UpperCAmelCase_ , '''is not a valid model identifier''' ):
lowerCAmelCase = cached_file('''tiny-random-bert''' , UpperCAmelCase_ )
with self.assertRaisesRegex(UpperCAmelCase_ , '''is not a valid git identifier''' ):
lowerCAmelCase = cached_file(UpperCAmelCase_ , UpperCAmelCase_ , revision='''aaaa''' )
with self.assertRaisesRegex(UpperCAmelCase_ , '''does not appear to have a file named''' ):
lowerCAmelCase = cached_file(UpperCAmelCase_ , '''conf''' )
def __snake_case ( self ):
with self.assertRaisesRegex(UpperCAmelCase_ , '''does not appear to have a file named''' ):
lowerCAmelCase = cached_file(UpperCAmelCase_ , '''conf''' )
with open(os.path.join(UpperCAmelCase_ , '''refs''' , '''main''' ) ) as f:
lowerCAmelCase = f.read()
self.assertTrue(os.path.isfile(os.path.join(UpperCAmelCase_ , '''.no_exist''' , UpperCAmelCase_ , '''conf''' ) ) )
lowerCAmelCase = cached_file(UpperCAmelCase_ , '''conf''' , _raise_exceptions_for_missing_entries=UpperCAmelCase_ )
self.assertIsNone(UpperCAmelCase_ )
lowerCAmelCase = cached_file(UpperCAmelCase_ , '''conf''' , local_files_only=UpperCAmelCase_ , _raise_exceptions_for_missing_entries=UpperCAmelCase_ )
self.assertIsNone(UpperCAmelCase_ )
lowerCAmelCase = mock.Mock()
lowerCAmelCase = 5_00
lowerCAmelCase = {}
lowerCAmelCase = HTTPError
lowerCAmelCase = {}
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('''requests.Session.request''' , return_value=UpperCAmelCase_ ) as mock_head:
lowerCAmelCase = cached_file(UpperCAmelCase_ , '''conf''' , _raise_exceptions_for_connection_errors=UpperCAmelCase_ )
self.assertIsNone(UpperCAmelCase_ )
# This check we did call the fake head request
mock_head.assert_called()
def __snake_case ( self ):
self.assertTrue(has_file('''hf-internal-testing/tiny-bert-pt-only''' , UpperCAmelCase_ ) )
self.assertFalse(has_file('''hf-internal-testing/tiny-bert-pt-only''' , UpperCAmelCase_ ) )
self.assertFalse(has_file('''hf-internal-testing/tiny-bert-pt-only''' , UpperCAmelCase_ ) )
def __snake_case ( self ):
# `get_file_from_repo` returns None if the file does not exist
self.assertIsNone(get_file_from_repo('''bert-base-cased''' , '''ahah.txt''' ) )
# The function raises if the repository does not exist.
with self.assertRaisesRegex(UpperCAmelCase_ , '''is not a valid model identifier''' ):
get_file_from_repo('''bert-base-case''' , UpperCAmelCase_ )
# The function raises if the revision does not exist.
with self.assertRaisesRegex(UpperCAmelCase_ , '''is not a valid git identifier''' ):
get_file_from_repo('''bert-base-cased''' , UpperCAmelCase_ , revision='''ahaha''' )
lowerCAmelCase = get_file_from_repo('''bert-base-cased''' , UpperCAmelCase_ )
# The name is the cached name which is not very easy to test, so instead we load the content.
lowerCAmelCase = json.loads(open(UpperCAmelCase_ , '''r''' ).read() )
self.assertEqual(config['''hidden_size'''] , 7_68 )
def __snake_case ( self ):
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCAmelCase = Path(UpperCAmelCase_ ) / '''a.txt'''
filename.touch()
self.assertEqual(get_file_from_repo(UpperCAmelCase_ , '''a.txt''' ) , str(UpperCAmelCase_ ) )
self.assertIsNone(get_file_from_repo(UpperCAmelCase_ , '''b.txt''' ) )
| 33
| 0
|
import os
import sys
UpperCAmelCase_ =os.path.join(os.path.dirname(__file__), """src""")
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
UpperCAmelCase_ =[
"""torch""",
"""numpy""",
"""tokenizers""",
"""filelock""",
"""requests""",
"""tqdm""",
"""regex""",
"""sentencepiece""",
"""sacremoses""",
"""importlib_metadata""",
"""huggingface_hub""",
]
@add_start_docstrings(AutoConfig.__doc__ )
def UpperCAmelCase ( *_snake_case , **_snake_case ):
return AutoConfig.from_pretrained(*_snake_case , **_snake_case )
@add_start_docstrings(AutoTokenizer.__doc__ )
def UpperCAmelCase ( *_snake_case , **_snake_case ):
return AutoTokenizer.from_pretrained(*_snake_case , **_snake_case )
@add_start_docstrings(AutoModel.__doc__ )
def UpperCAmelCase ( *_snake_case , **_snake_case ):
return AutoModel.from_pretrained(*_snake_case , **_snake_case )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def UpperCAmelCase ( *_snake_case , **_snake_case ):
return AutoModelForCausalLM.from_pretrained(*_snake_case , **_snake_case )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def UpperCAmelCase ( *_snake_case , **_snake_case ):
return AutoModelForMaskedLM.from_pretrained(*_snake_case , **_snake_case )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def UpperCAmelCase ( *_snake_case , **_snake_case ):
return AutoModelForSequenceClassification.from_pretrained(*_snake_case , **_snake_case )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def UpperCAmelCase ( *_snake_case , **_snake_case ):
return AutoModelForQuestionAnswering.from_pretrained(*_snake_case , **_snake_case )
| 716
|
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class __UpperCamelCase ( __UpperCAmelCase ):
'''simple docstring'''
def __init__( self , UpperCAmelCase_ , UpperCAmelCase_ = None , UpperCAmelCase_ = None , UpperCAmelCase_ = True , UpperCAmelCase_ = None , UpperCAmelCase_ = False , UpperCAmelCase_ = None , UpperCAmelCase_ = True , UpperCAmelCase_ = "arrow" , **UpperCAmelCase_ , ):
super().__init__(
split=UpperCAmelCase_ , features=UpperCAmelCase_ , cache_dir=UpperCAmelCase_ , keep_in_memory=UpperCAmelCase_ , streaming=UpperCAmelCase_ , **UpperCAmelCase_ , )
lowerCAmelCase = load_from_cache_file
lowerCAmelCase = file_format
lowerCAmelCase = Spark(
df=UpperCAmelCase_ , features=UpperCAmelCase_ , cache_dir=UpperCAmelCase_ , working_dir=UpperCAmelCase_ , **UpperCAmelCase_ , )
def __snake_case ( self ):
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
lowerCAmelCase = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=UpperCAmelCase_ , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split )
| 33
| 0
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase_ =logging.get_logger(__name__)
UpperCAmelCase_ ={
"""facebook/data2vec-vision-base-ft""": (
"""https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json"""
),
}
class __UpperCamelCase ( __UpperCAmelCase ):
'''simple docstring'''
__a : Optional[int] ="""data2vec-vision"""
def __init__( self , UpperCAmelCase_=7_68 , UpperCAmelCase_=12 , UpperCAmelCase_=12 , UpperCAmelCase_=30_72 , UpperCAmelCase_="gelu" , UpperCAmelCase_=0.0 , UpperCAmelCase_=0.0 , UpperCAmelCase_=0.02 , UpperCAmelCase_=1E-1_2 , UpperCAmelCase_=2_24 , UpperCAmelCase_=16 , UpperCAmelCase_=3 , UpperCAmelCase_=False , UpperCAmelCase_=False , UpperCAmelCase_=False , UpperCAmelCase_=False , UpperCAmelCase_=0.1 , UpperCAmelCase_=0.1 , UpperCAmelCase_=True , UpperCAmelCase_=[3, 5, 7, 11] , UpperCAmelCase_=[1, 2, 3, 6] , UpperCAmelCase_=True , UpperCAmelCase_=0.4 , UpperCAmelCase_=2_56 , UpperCAmelCase_=1 , UpperCAmelCase_=False , UpperCAmelCase_=2_55 , **UpperCAmelCase_ , ):
super().__init__(**UpperCAmelCase_ )
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = initializer_range
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = image_size
lowerCAmelCase = patch_size
lowerCAmelCase = num_channels
lowerCAmelCase = use_mask_token
lowerCAmelCase = use_absolute_position_embeddings
lowerCAmelCase = use_relative_position_bias
lowerCAmelCase = use_shared_relative_position_bias
lowerCAmelCase = layer_scale_init_value
lowerCAmelCase = drop_path_rate
lowerCAmelCase = use_mean_pooling
# decode head attributes (semantic segmentation)
lowerCAmelCase = out_indices
lowerCAmelCase = pool_scales
# auxiliary head attributes (semantic segmentation)
lowerCAmelCase = use_auxiliary_head
lowerCAmelCase = auxiliary_loss_weight
lowerCAmelCase = auxiliary_channels
lowerCAmelCase = auxiliary_num_convs
lowerCAmelCase = auxiliary_concat_input
lowerCAmelCase = semantic_loss_ignore_index
class __UpperCamelCase ( __UpperCAmelCase ):
'''simple docstring'''
__a : Optional[Any] =version.parse("""1.11""" )
@property
def __snake_case ( self ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def __snake_case ( self ):
return 1E-4
| 717
|
import math
import numpy as np
import qiskit
from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute
def UpperCAmelCase ( _snake_case = 3 ):
if isinstance(_snake_case , _snake_case ):
raise TypeError('''number of qubits must be a integer.''' )
if number_of_qubits <= 0:
raise ValueError('''number of qubits must be > 0.''' )
if math.floor(_snake_case ) != number_of_qubits:
raise ValueError('''number of qubits must be exact integer.''' )
if number_of_qubits > 10:
raise ValueError('''number of qubits too large to simulate(>10).''' )
lowerCAmelCase = QuantumRegister(_snake_case , '''qr''' )
lowerCAmelCase = ClassicalRegister(_snake_case , '''cr''' )
lowerCAmelCase = QuantumCircuit(_snake_case , _snake_case )
lowerCAmelCase = number_of_qubits
for i in range(_snake_case ):
quantum_circuit.h(number_of_qubits - i - 1 )
counter -= 1
for j in range(_snake_case ):
quantum_circuit.cp(np.pi / 2 ** (counter - j) , _snake_case , _snake_case )
for k in range(number_of_qubits // 2 ):
quantum_circuit.swap(_snake_case , number_of_qubits - k - 1 )
# measure all the qubits
quantum_circuit.measure(_snake_case , _snake_case )
# simulate with 10000 shots
lowerCAmelCase = Aer.get_backend('''qasm_simulator''' )
lowerCAmelCase = execute(_snake_case , _snake_case , shots=10000 )
return job.result().get_counts(_snake_case )
if __name__ == "__main__":
print(
F'''Total count for quantum fourier transform state is: \
{quantum_fourier_transform(3)}'''
)
| 33
| 0
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import GLPNImageProcessor
class __UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , UpperCAmelCase_ , UpperCAmelCase_=7 , UpperCAmelCase_=3 , UpperCAmelCase_=18 , UpperCAmelCase_=30 , UpperCAmelCase_=4_00 , UpperCAmelCase_=True , UpperCAmelCase_=32 , UpperCAmelCase_=True , ):
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = num_channels
lowerCAmelCase = image_size
lowerCAmelCase = min_resolution
lowerCAmelCase = max_resolution
lowerCAmelCase = do_resize
lowerCAmelCase = size_divisor
lowerCAmelCase = do_rescale
def __snake_case ( self ):
return {
"do_resize": self.do_resize,
"size_divisor": self.size_divisor,
"do_rescale": self.do_rescale,
}
@require_torch
@require_vision
class __UpperCamelCase ( __UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
__a : Dict =GLPNImageProcessor if is_vision_available() else None
def __snake_case ( self ):
lowerCAmelCase = GLPNImageProcessingTester(self )
@property
def __snake_case ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def __snake_case ( self ):
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase_ , '''do_resize''' ) )
self.assertTrue(hasattr(UpperCAmelCase_ , '''size_divisor''' ) )
self.assertTrue(hasattr(UpperCAmelCase_ , '''resample''' ) )
self.assertTrue(hasattr(UpperCAmelCase_ , '''do_rescale''' ) )
def __snake_case ( self ):
pass
def __snake_case ( self ):
# Initialize image_processing
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , Image.Image )
# Test not batched input (GLPNImageProcessor doesn't support batching)
lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def __snake_case ( self ):
# Initialize image_processing
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , numpify=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , np.ndarray )
# Test not batched input (GLPNImageProcessor doesn't support batching)
lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def __snake_case ( self ):
# Initialize image_processing
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , torchify=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , torch.Tensor )
# Test not batched input (GLPNImageProcessor doesn't support batching)
lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
| 718
|
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from typing import Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import randn_tensor
from .scheduling_utils import SchedulerMixin
class __UpperCamelCase ( __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__a : Any =1
@register_to_config
def __init__( self , UpperCAmelCase_=20_00 , UpperCAmelCase_=0.1 , UpperCAmelCase_=20 , UpperCAmelCase_=1E-3 ):
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = None
def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ = None ):
lowerCAmelCase = torch.linspace(1 , self.config.sampling_eps , UpperCAmelCase_ , device=UpperCAmelCase_ )
def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=None ):
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' )
# TODO(Patrick) better comments + non-PyTorch
# postprocess model score
lowerCAmelCase = (
-0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
)
lowerCAmelCase = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) )
lowerCAmelCase = std.flatten()
while len(std.shape ) < len(score.shape ):
lowerCAmelCase = std.unsqueeze(-1 )
lowerCAmelCase = -score / std
# compute
lowerCAmelCase = -1.0 / len(self.timesteps )
lowerCAmelCase = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min)
lowerCAmelCase = beta_t.flatten()
while len(beta_t.shape ) < len(x.shape ):
lowerCAmelCase = beta_t.unsqueeze(-1 )
lowerCAmelCase = -0.5 * beta_t * x
lowerCAmelCase = torch.sqrt(UpperCAmelCase_ )
lowerCAmelCase = drift - diffusion**2 * score
lowerCAmelCase = x + drift * dt
# add noise
lowerCAmelCase = randn_tensor(x.shape , layout=x.layout , generator=UpperCAmelCase_ , device=x.device , dtype=x.dtype )
lowerCAmelCase = x_mean + diffusion * math.sqrt(-dt ) * noise
return x, x_mean
def __len__( self ):
return self.config.num_train_timesteps
| 33
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase_ ={
"""configuration_roberta""": ["""ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """RobertaConfig""", """RobertaOnnxConfig"""],
"""tokenization_roberta""": ["""RobertaTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ =["""RobertaTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ =[
"""ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RobertaForCausalLM""",
"""RobertaForMaskedLM""",
"""RobertaForMultipleChoice""",
"""RobertaForQuestionAnswering""",
"""RobertaForSequenceClassification""",
"""RobertaForTokenClassification""",
"""RobertaModel""",
"""RobertaPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ =[
"""TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRobertaForCausalLM""",
"""TFRobertaForMaskedLM""",
"""TFRobertaForMultipleChoice""",
"""TFRobertaForQuestionAnswering""",
"""TFRobertaForSequenceClassification""",
"""TFRobertaForTokenClassification""",
"""TFRobertaMainLayer""",
"""TFRobertaModel""",
"""TFRobertaPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ =[
"""FlaxRobertaForCausalLM""",
"""FlaxRobertaForMaskedLM""",
"""FlaxRobertaForMultipleChoice""",
"""FlaxRobertaForQuestionAnswering""",
"""FlaxRobertaForSequenceClassification""",
"""FlaxRobertaForTokenClassification""",
"""FlaxRobertaModel""",
"""FlaxRobertaPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig
from .tokenization_roberta import RobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roberta_fast import RobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta import (
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaForCausalLM,
RobertaForMaskedLM,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaModel,
RobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta import (
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForMultipleChoice,
TFRobertaForQuestionAnswering,
TFRobertaForSequenceClassification,
TFRobertaForTokenClassification,
TFRobertaMainLayer,
TFRobertaModel,
TFRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
FlaxRobertaPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 719
|
from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class __UpperCamelCase ( yaml.SafeLoader ):
'''simple docstring'''
def __snake_case ( self , UpperCAmelCase_ ):
lowerCAmelCase = [self.constructed_objects[key_node] for key_node, _ in node.value]
lowerCAmelCase = [tuple(UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else key for key in keys]
lowerCAmelCase = Counter(UpperCAmelCase_ )
lowerCAmelCase = [key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(F"""Got duplicate yaml keys: {duplicate_keys}""" )
def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_=False ):
lowerCAmelCase = super().construct_mapping(UpperCAmelCase_ , deep=UpperCAmelCase_ )
self._check_no_duplicates_on_constructed_node(UpperCAmelCase_ )
return mapping
def UpperCAmelCase ( _snake_case ):
lowerCAmelCase = list(readme_content.splitlines() )
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
lowerCAmelCase = full_content[1:].index('''---''' ) + 1
lowerCAmelCase = '''\n'''.join(full_content[1:sep_idx] )
return yamlblock, "\n".join(full_content[sep_idx + 1 :] )
return None, "\n".join(_snake_case )
class __UpperCamelCase ( __UpperCAmelCase ):
'''simple docstring'''
__a : Any ={"""train_eval_index"""} # train-eval-index in the YAML metadata
@classmethod
def __snake_case ( cls , UpperCAmelCase_ ):
with open(UpperCAmelCase_ , encoding='''utf-8''' ) as readme_file:
lowerCAmelCase , lowerCAmelCase = _split_yaml_from_readme(readme_file.read() )
if yaml_string is not None:
return cls.from_yaml_string(UpperCAmelCase_ )
else:
return cls()
def __snake_case ( self , UpperCAmelCase_ ):
if path.exists():
with open(UpperCAmelCase_ , encoding='''utf-8''' ) as readme_file:
lowerCAmelCase = readme_file.read()
else:
lowerCAmelCase = None
lowerCAmelCase = self._to_readme(UpperCAmelCase_ )
with open(UpperCAmelCase_ , '''w''' , encoding='''utf-8''' ) as readme_file:
readme_file.write(UpperCAmelCase_ )
def __snake_case ( self , UpperCAmelCase_ = None ):
if readme_content is not None:
lowerCAmelCase , lowerCAmelCase = _split_yaml_from_readme(UpperCAmelCase_ )
lowerCAmelCase = '''---\n''' + self.to_yaml_string() + '''---\n''' + content
else:
lowerCAmelCase = '''---\n''' + self.to_yaml_string() + '''---\n'''
return full_content
@classmethod
def __snake_case ( cls , UpperCAmelCase_ ):
lowerCAmelCase = yaml.load(UpperCAmelCase_ , Loader=_NoDuplicateSafeLoader ) or {}
# Convert the YAML keys to DatasetMetadata fields
lowerCAmelCase = {
(key.replace('''-''' , '''_''' ) if key.replace('''-''' , '''_''' ) in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**UpperCAmelCase_ )
def __snake_case ( self ):
return yaml.safe_dump(
{
(key.replace('''_''' , '''-''' ) if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
} , sort_keys=UpperCAmelCase_ , allow_unicode=UpperCAmelCase_ , encoding='''utf-8''' , ).decode('''utf-8''' )
UpperCAmelCase_ ={
"""image-classification""": [],
"""translation""": [],
"""image-segmentation""": [],
"""fill-mask""": [],
"""automatic-speech-recognition""": [],
"""token-classification""": [],
"""sentence-similarity""": [],
"""audio-classification""": [],
"""question-answering""": [],
"""summarization""": [],
"""zero-shot-classification""": [],
"""table-to-text""": [],
"""feature-extraction""": [],
"""other""": [],
"""multiple-choice""": [],
"""text-classification""": [],
"""text-to-image""": [],
"""text2text-generation""": [],
"""zero-shot-image-classification""": [],
"""tabular-classification""": [],
"""tabular-regression""": [],
"""image-to-image""": [],
"""tabular-to-text""": [],
"""unconditional-image-generation""": [],
"""text-retrieval""": [],
"""text-to-speech""": [],
"""object-detection""": [],
"""audio-to-audio""": [],
"""text-generation""": [],
"""conversational""": [],
"""table-question-answering""": [],
"""visual-question-answering""": [],
"""image-to-text""": [],
"""reinforcement-learning""": [],
"""voice-activity-detection""": [],
"""time-series-forecasting""": [],
"""document-question-answering""": [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
UpperCAmelCase_ =ArgumentParser(usage="""Validate the yaml metadata block of a README.md file.""")
ap.add_argument("""readme_filepath""")
UpperCAmelCase_ =ap.parse_args()
UpperCAmelCase_ =Path(args.readme_filepath)
UpperCAmelCase_ =DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath)
| 33
| 0
|
from math import factorial
def UpperCAmelCase ( _snake_case , _snake_case ):
# If either of the conditions are true, the function is being asked
# to calculate a factorial of a negative number, which is not possible
if n < k or k < 0:
raise ValueError('''Please enter positive integers for n and k where n >= k''' )
return factorial(_snake_case ) // (factorial(_snake_case ) * factorial(n - k ))
if __name__ == "__main__":
print(
"""The number of five-card hands possible from a standard""",
F'''fifty-two card deck is: {combinations(52, 5)}\n''',
)
print(
"""If a class of 40 students must be arranged into groups of""",
F'''4 for group projects, there are {combinations(40, 4)} ways''',
"""to arrange them.\n""",
)
print(
"""If 10 teams are competing in a Formula One race, there""",
F'''are {combinations(10, 3)} ways that first, second and''',
"""third place can be awarded.""",
)
| 720
|
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class __UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
__a : str =MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
lowerCAmelCase = hf_hub_download(
repo_id='''nateraw/video-demo''' , filename='''archery.mp4''' , repo_type='''dataset''' )
lowerCAmelCase = VideoClassificationPipeline(model=UpperCAmelCase_ , image_processor=UpperCAmelCase_ , top_k=2 )
lowerCAmelCase = [
example_video_filepath,
'''https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4''',
]
return video_classifier, examples
def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ ):
for example in examples:
lowerCAmelCase = video_classifier(UpperCAmelCase_ )
self.assertEqual(
UpperCAmelCase_ , [
{'''score''': ANY(UpperCAmelCase_ ), '''label''': ANY(UpperCAmelCase_ )},
{'''score''': ANY(UpperCAmelCase_ ), '''label''': ANY(UpperCAmelCase_ )},
] , )
@require_torch
def __snake_case ( self ):
lowerCAmelCase = '''hf-internal-testing/tiny-random-VideoMAEForVideoClassification'''
lowerCAmelCase = VideoMAEFeatureExtractor(
size={'''shortest_edge''': 10} , crop_size={'''height''': 10, '''width''': 10} )
lowerCAmelCase = pipeline(
'''video-classification''' , model=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ , frame_sampling_rate=4 )
lowerCAmelCase = hf_hub_download(repo_id='''nateraw/video-demo''' , filename='''archery.mp4''' , repo_type='''dataset''' )
lowerCAmelCase = video_classifier(UpperCAmelCase_ , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase_ , decimals=4 ) , [{'''score''': 0.5199, '''label''': '''LABEL_0'''}, {'''score''': 0.4801, '''label''': '''LABEL_1'''}] , )
lowerCAmelCase = video_classifier(
[
video_file_path,
video_file_path,
] , top_k=2 , )
self.assertEqual(
nested_simplify(UpperCAmelCase_ , decimals=4 ) , [
[{'''score''': 0.5199, '''label''': '''LABEL_0'''}, {'''score''': 0.4801, '''label''': '''LABEL_1'''}],
[{'''score''': 0.5199, '''label''': '''LABEL_0'''}, {'''score''': 0.4801, '''label''': '''LABEL_1'''}],
] , )
@require_tf
def __snake_case ( self ):
pass
| 33
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
UpperCAmelCase_ =logging.get_logger(__name__)
class __UpperCamelCase ( __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__a : Optional[Any] ="""maskformer-swin"""
__a : Optional[int] ={
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self , UpperCAmelCase_=2_24 , UpperCAmelCase_=4 , UpperCAmelCase_=3 , UpperCAmelCase_=96 , UpperCAmelCase_=[2, 2, 6, 2] , UpperCAmelCase_=[3, 6, 12, 24] , UpperCAmelCase_=7 , UpperCAmelCase_=4.0 , UpperCAmelCase_=True , UpperCAmelCase_=0.0 , UpperCAmelCase_=0.0 , UpperCAmelCase_=0.1 , UpperCAmelCase_="gelu" , UpperCAmelCase_=False , UpperCAmelCase_=0.02 , UpperCAmelCase_=1E-5 , UpperCAmelCase_=None , UpperCAmelCase_=None , **UpperCAmelCase_ , ):
super().__init__(**UpperCAmelCase_ )
lowerCAmelCase = image_size
lowerCAmelCase = patch_size
lowerCAmelCase = num_channels
lowerCAmelCase = embed_dim
lowerCAmelCase = depths
lowerCAmelCase = len(UpperCAmelCase_ )
lowerCAmelCase = num_heads
lowerCAmelCase = window_size
lowerCAmelCase = mlp_ratio
lowerCAmelCase = qkv_bias
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = drop_path_rate
lowerCAmelCase = hidden_act
lowerCAmelCase = use_absolute_embeddings
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowerCAmelCase = int(embed_dim * 2 ** (len(UpperCAmelCase_ ) - 1) )
lowerCAmelCase = ['''stem'''] + [F"""stage{idx}""" for idx in range(1 , len(UpperCAmelCase_ ) + 1 )]
lowerCAmelCase , lowerCAmelCase = get_aligned_output_features_output_indices(
out_features=UpperCAmelCase_ , out_indices=UpperCAmelCase_ , stage_names=self.stage_names )
| 721
|
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class __UpperCamelCase ( __UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
__a : Any ="""hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline"""
def __snake_case ( self , UpperCAmelCase_=0 ):
lowerCAmelCase = floats_tensor((1, 3, 1_28, 1_28) , rng=random.Random(UpperCAmelCase_ ) )
lowerCAmelCase = np.random.RandomState(UpperCAmelCase_ )
lowerCAmelCase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 3,
'''strength''': 0.75,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def __snake_case ( self ):
lowerCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
lowerCAmelCase = self.get_dummy_inputs()
lowerCAmelCase = pipe(**UpperCAmelCase_ ).images
lowerCAmelCase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 1_28, 1_28, 3)
lowerCAmelCase = np.array([0.69643, 0.58484, 0.50314, 0.58760, 0.55368, 0.59643, 0.51529, 0.41217, 0.49087] )
assert np.abs(image_slice - expected_slice ).max() < 1E-1
def __snake_case ( self ):
lowerCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
lowerCAmelCase = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=UpperCAmelCase_ )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
lowerCAmelCase = self.get_dummy_inputs()
lowerCAmelCase = pipe(**UpperCAmelCase_ ).images
lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
lowerCAmelCase = np.array([0.61737, 0.54642, 0.53183, 0.54465, 0.52742, 0.60525, 0.49969, 0.40655, 0.48154] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def __snake_case ( self ):
lowerCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
lowerCAmelCase = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
# warmup pass to apply optimizations
lowerCAmelCase = pipe(**self.get_dummy_inputs() )
lowerCAmelCase = self.get_dummy_inputs()
lowerCAmelCase = pipe(**UpperCAmelCase_ ).images
lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
lowerCAmelCase = np.array([0.52761, 0.59977, 0.49033, 0.49619, 0.54282, 0.50311, 0.47600, 0.40918, 0.45203] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def __snake_case ( self ):
lowerCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
lowerCAmelCase = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
lowerCAmelCase = self.get_dummy_inputs()
lowerCAmelCase = pipe(**UpperCAmelCase_ ).images
lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
lowerCAmelCase = np.array([0.52911, 0.60004, 0.49229, 0.49805, 0.54502, 0.50680, 0.47777, 0.41028, 0.45304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def __snake_case ( self ):
lowerCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
lowerCAmelCase = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
lowerCAmelCase = self.get_dummy_inputs()
lowerCAmelCase = pipe(**UpperCAmelCase_ ).images
lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
lowerCAmelCase = np.array([0.52911, 0.60004, 0.49229, 0.49805, 0.54502, 0.50680, 0.47777, 0.41028, 0.45304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def __snake_case ( self ):
lowerCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
lowerCAmelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
lowerCAmelCase = self.get_dummy_inputs()
lowerCAmelCase = pipe(**UpperCAmelCase_ ).images
lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
lowerCAmelCase = np.array([0.65331, 0.58277, 0.48204, 0.56059, 0.53665, 0.56235, 0.50969, 0.40009, 0.46552] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
@nightly
@require_onnxruntime
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@property
def __snake_case ( self ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def __snake_case ( self ):
lowerCAmelCase = ort.SessionOptions()
lowerCAmelCase = False
return options
def __snake_case ( self ):
lowerCAmelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
lowerCAmelCase = init_image.resize((7_68, 5_12) )
# using the PNDM scheduler by default
lowerCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''onnx''' , safety_checker=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
lowerCAmelCase = '''A fantasy landscape, trending on artstation'''
lowerCAmelCase = np.random.RandomState(0 )
lowerCAmelCase = pipe(
prompt=UpperCAmelCase_ , image=UpperCAmelCase_ , strength=0.75 , guidance_scale=7.5 , num_inference_steps=10 , generator=UpperCAmelCase_ , output_type='''np''' , )
lowerCAmelCase = output.images
lowerCAmelCase = images[0, 2_55:2_58, 3_83:3_86, -1]
assert images.shape == (1, 5_12, 7_68, 3)
lowerCAmelCase = np.array([0.4909, 0.5059, 0.5372, 0.4623, 0.4876, 0.5049, 0.4820, 0.4956, 0.5019] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def __snake_case ( self ):
lowerCAmelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
lowerCAmelCase = init_image.resize((7_68, 5_12) )
lowerCAmelCase = LMSDiscreteScheduler.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , subfolder='''scheduler''' , revision='''onnx''' )
lowerCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , revision='''onnx''' , scheduler=UpperCAmelCase_ , safety_checker=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
lowerCAmelCase = '''A fantasy landscape, trending on artstation'''
lowerCAmelCase = np.random.RandomState(0 )
lowerCAmelCase = pipe(
prompt=UpperCAmelCase_ , image=UpperCAmelCase_ , strength=0.75 , guidance_scale=7.5 , num_inference_steps=20 , generator=UpperCAmelCase_ , output_type='''np''' , )
lowerCAmelCase = output.images
lowerCAmelCase = images[0, 2_55:2_58, 3_83:3_86, -1]
assert images.shape == (1, 5_12, 7_68, 3)
lowerCAmelCase = np.array([0.8043, 0.926, 0.9581, 0.8119, 0.8954, 0.913, 0.7209, 0.7463, 0.7431] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
| 33
| 0
|
import os
from datetime import datetime as dt
from github import Github
UpperCAmelCase_ =[
"""good first issue""",
"""good second issue""",
"""good difficult issue""",
"""enhancement""",
"""new pipeline/model""",
"""new scheduler""",
"""wip""",
]
def UpperCAmelCase ( ):
lowerCAmelCase = Github(os.environ['''GITHUB_TOKEN'''] )
lowerCAmelCase = g.get_repo('''huggingface/diffusers''' )
lowerCAmelCase = repo.get_issues(state='''open''' )
for issue in open_issues:
lowerCAmelCase = sorted(issue.get_comments() , key=lambda _snake_case : i.created_at , reverse=_snake_case )
lowerCAmelCase = comments[0] if len(_snake_case ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state='''closed''' )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state='''open''' )
issue.remove_from_labels('''stale''' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''' )
issue.add_to_labels('''stale''' )
if __name__ == "__main__":
main()
| 700
|
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def UpperCAmelCase ( _snake_case ):
lowerCAmelCase = args.pruning_method
lowerCAmelCase = args.threshold
lowerCAmelCase = args.model_name_or_path.rstrip('''/''' )
lowerCAmelCase = args.target_model_path
print(F"""Load fine-pruned model from {model_name_or_path}""" )
lowerCAmelCase = torch.load(os.path.join(_snake_case , '''pytorch_model.bin''' ) )
lowerCAmelCase = {}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
lowerCAmelCase = tensor
print(F"""Copied layer {name}""" )
elif "classifier" in name or "qa_output" in name:
lowerCAmelCase = tensor
print(F"""Copied layer {name}""" )
elif "bias" in name:
lowerCAmelCase = tensor
print(F"""Copied layer {name}""" )
else:
if pruning_method == "magnitude":
lowerCAmelCase = MagnitudeBinarizer.apply(inputs=_snake_case , threshold=_snake_case )
lowerCAmelCase = tensor * mask
print(F"""Pruned layer {name}""" )
elif pruning_method == "topK":
if "mask_scores" in name:
continue
lowerCAmelCase = name[:-6]
lowerCAmelCase = model[F"""{prefix_}mask_scores"""]
lowerCAmelCase = TopKBinarizer.apply(_snake_case , _snake_case )
lowerCAmelCase = tensor * mask
print(F"""Pruned layer {name}""" )
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
lowerCAmelCase = name[:-6]
lowerCAmelCase = model[F"""{prefix_}mask_scores"""]
lowerCAmelCase = ThresholdBinarizer.apply(_snake_case , _snake_case , _snake_case )
lowerCAmelCase = tensor * mask
print(F"""Pruned layer {name}""" )
elif pruning_method == "l0":
if "mask_scores" in name:
continue
lowerCAmelCase = name[:-6]
lowerCAmelCase = model[F"""{prefix_}mask_scores"""]
lowerCAmelCase , lowerCAmelCase = -0.1, 1.1
lowerCAmelCase = torch.sigmoid(_snake_case )
lowerCAmelCase = s * (r - l) + l
lowerCAmelCase = s_bar.clamp(min=0.0 , max=1.0 )
lowerCAmelCase = tensor * mask
print(F"""Pruned layer {name}""" )
else:
raise ValueError('''Unknown pruning method''' )
if target_model_path is None:
lowerCAmelCase = os.path.join(
os.path.dirname(_snake_case ) , F"""bertarized_{os.path.basename(_snake_case )}""" )
if not os.path.isdir(_snake_case ):
shutil.copytree(_snake_case , _snake_case )
print(F"""\nCreated folder {target_model_path}""" )
torch.save(_snake_case , os.path.join(_snake_case , '''pytorch_model.bin''' ) )
print('''\nPruned model saved! See you later!''' )
if __name__ == "__main__":
UpperCAmelCase_ =argparse.ArgumentParser()
parser.add_argument(
"""--pruning_method""",
choices=["""l0""", """magnitude""", """topK""", """sigmoied_threshold"""],
type=str,
required=True,
help=(
"""Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,"""
""" sigmoied_threshold = Soft movement pruning)"""
),
)
parser.add_argument(
"""--threshold""",
type=float,
required=False,
help=(
"""For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model."""
"""For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared."""
"""Not needed for `l0`"""
),
)
parser.add_argument(
"""--model_name_or_path""",
type=str,
required=True,
help="""Folder containing the model that was previously fine-pruned""",
)
parser.add_argument(
"""--target_model_path""",
default=None,
type=str,
required=False,
help="""Folder containing the model that was previously fine-pruned""",
)
UpperCAmelCase_ =parser.parse_args()
main(args)
| 33
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase_ ={
"""configuration_xlm_roberta_xl""": [
"""XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""XLMRobertaXLConfig""",
"""XLMRobertaXLOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ =[
"""XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XLMRobertaXLForCausalLM""",
"""XLMRobertaXLForMaskedLM""",
"""XLMRobertaXLForMultipleChoice""",
"""XLMRobertaXLForQuestionAnswering""",
"""XLMRobertaXLForSequenceClassification""",
"""XLMRobertaXLForTokenClassification""",
"""XLMRobertaXLModel""",
"""XLMRobertaXLPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaXLConfig,
XLMRobertaXLOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaXLForCausalLM,
XLMRobertaXLForMaskedLM,
XLMRobertaXLForMultipleChoice,
XLMRobertaXLForQuestionAnswering,
XLMRobertaXLForSequenceClassification,
XLMRobertaXLForTokenClassification,
XLMRobertaXLModel,
XLMRobertaXLPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ =_LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 701
|
import os
import re
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase_ =logging.get_logger(__name__)
UpperCAmelCase_ ={
"""vocab_file""": """vocab.txt""",
"""merges_file""": """bpe.codes""",
}
UpperCAmelCase_ ={
"""vocab_file""": {
"""vinai/phobert-base""": """https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt""",
"""vinai/phobert-large""": """https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt""",
},
"""merges_file""": {
"""vinai/phobert-base""": """https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes""",
"""vinai/phobert-large""": """https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes""",
},
}
UpperCAmelCase_ ={
"""vinai/phobert-base""": 256,
"""vinai/phobert-large""": 256,
}
def UpperCAmelCase ( _snake_case ):
lowerCAmelCase = set()
lowerCAmelCase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowerCAmelCase = char
lowerCAmelCase = set(_snake_case )
return pairs
class __UpperCamelCase ( __UpperCAmelCase ):
'''simple docstring'''
__a : Union[str, Any] =VOCAB_FILES_NAMES
__a : Optional[Any] =PRETRAINED_VOCAB_FILES_MAP
__a : Optional[int] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_="<s>" , UpperCAmelCase_="</s>" , UpperCAmelCase_="</s>" , UpperCAmelCase_="<s>" , UpperCAmelCase_="<unk>" , UpperCAmelCase_="<pad>" , UpperCAmelCase_="<mask>" , **UpperCAmelCase_ , ):
super().__init__(
bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , **UpperCAmelCase_ , )
lowerCAmelCase = vocab_file
lowerCAmelCase = merges_file
lowerCAmelCase = {}
lowerCAmelCase = 0
lowerCAmelCase = 1
lowerCAmelCase = 2
lowerCAmelCase = 3
self.add_from_file(UpperCAmelCase_ )
lowerCAmelCase = {v: k for k, v in self.encoder.items()}
with open(UpperCAmelCase_ , encoding='''utf-8''' ) as merges_handle:
lowerCAmelCase = merges_handle.read().split('''\n''' )[:-1]
lowerCAmelCase = [tuple(merge.split()[:-1] ) for merge in merges]
lowerCAmelCase = dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_ ) ) ) )
lowerCAmelCase = {}
def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
lowerCAmelCase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ = None , UpperCAmelCase_ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase_ , token_ids_a=UpperCAmelCase_ , already_has_special_tokens=UpperCAmelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(UpperCAmelCase_ )) + [1]
return [1] + ([0] * len(UpperCAmelCase_ )) + [1, 1] + ([0] * len(UpperCAmelCase_ )) + [1]
def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ = None ):
lowerCAmelCase = [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def __snake_case ( self ):
return len(self.encoder )
def __snake_case ( self ):
return dict(self.encoder , **self.added_tokens_encoder )
def __snake_case ( self , UpperCAmelCase_ ):
if token in self.cache:
return self.cache[token]
lowerCAmelCase = tuple(UpperCAmelCase_ )
lowerCAmelCase = tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] )
lowerCAmelCase = get_pairs(UpperCAmelCase_ )
if not pairs:
return token
while True:
lowerCAmelCase = min(UpperCAmelCase_ , key=lambda UpperCAmelCase_ : self.bpe_ranks.get(UpperCAmelCase_ , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
lowerCAmelCase , lowerCAmelCase = bigram
lowerCAmelCase = []
lowerCAmelCase = 0
while i < len(UpperCAmelCase_ ):
try:
lowerCAmelCase = word.index(UpperCAmelCase_ , UpperCAmelCase_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowerCAmelCase = j
if word[i] == first and i < len(UpperCAmelCase_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowerCAmelCase = tuple(UpperCAmelCase_ )
lowerCAmelCase = new_word
if len(UpperCAmelCase_ ) == 1:
break
else:
lowerCAmelCase = get_pairs(UpperCAmelCase_ )
lowerCAmelCase = '''@@ '''.join(UpperCAmelCase_ )
lowerCAmelCase = word[:-4]
lowerCAmelCase = word
return word
def __snake_case ( self , UpperCAmelCase_ ):
lowerCAmelCase = []
lowerCAmelCase = re.findall(r'''\S+\n?''' , UpperCAmelCase_ )
for token in words:
split_tokens.extend(list(self.bpe(UpperCAmelCase_ ).split(''' ''' ) ) )
return split_tokens
def __snake_case ( self , UpperCAmelCase_ ):
return self.encoder.get(UpperCAmelCase_ , self.encoder.get(self.unk_token ) )
def __snake_case ( self , UpperCAmelCase_ ):
return self.decoder.get(UpperCAmelCase_ , self.unk_token )
def __snake_case ( self , UpperCAmelCase_ ):
lowerCAmelCase = ''' '''.join(UpperCAmelCase_ ).replace('''@@ ''' , '''''' ).strip()
return out_string
def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ = None ):
if not os.path.isdir(UpperCAmelCase_ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCAmelCase = os.path.join(
UpperCAmelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCAmelCase = os.path.join(
UpperCAmelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase_ ):
copyfile(self.vocab_file , UpperCAmelCase_ )
if os.path.abspath(self.merges_file ) != os.path.abspath(UpperCAmelCase_ ):
copyfile(self.merges_file , UpperCAmelCase_ )
return out_vocab_file, out_merge_file
def __snake_case ( self , UpperCAmelCase_ ):
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
try:
with open(UpperCAmelCase_ , '''r''' , encoding='''utf-8''' ) as fd:
self.add_from_file(UpperCAmelCase_ )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception(F"""Incorrect encoding detected in {f}, please rebuild the dataset""" )
return
lowerCAmelCase = f.readlines()
for lineTmp in lines:
lowerCAmelCase = lineTmp.strip()
lowerCAmelCase = line.rfind(''' ''' )
if idx == -1:
raise ValueError('''Incorrect dictionary format, expected \'<token> <cnt>\'''' )
lowerCAmelCase = line[:idx]
lowerCAmelCase = len(self.encoder )
| 33
| 0
|
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class __UpperCamelCase ( __UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
__a : Any ="""hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline"""
def __snake_case ( self , UpperCAmelCase_=0 ):
lowerCAmelCase = floats_tensor((1, 3, 1_28, 1_28) , rng=random.Random(UpperCAmelCase_ ) )
lowerCAmelCase = np.random.RandomState(UpperCAmelCase_ )
lowerCAmelCase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 3,
'''strength''': 0.75,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def __snake_case ( self ):
lowerCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
lowerCAmelCase = self.get_dummy_inputs()
lowerCAmelCase = pipe(**UpperCAmelCase_ ).images
lowerCAmelCase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 1_28, 1_28, 3)
lowerCAmelCase = np.array([0.69643, 0.58484, 0.50314, 0.58760, 0.55368, 0.59643, 0.51529, 0.41217, 0.49087] )
assert np.abs(image_slice - expected_slice ).max() < 1E-1
def __snake_case ( self ):
lowerCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
lowerCAmelCase = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=UpperCAmelCase_ )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
lowerCAmelCase = self.get_dummy_inputs()
lowerCAmelCase = pipe(**UpperCAmelCase_ ).images
lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
lowerCAmelCase = np.array([0.61737, 0.54642, 0.53183, 0.54465, 0.52742, 0.60525, 0.49969, 0.40655, 0.48154] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def __snake_case ( self ):
lowerCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
lowerCAmelCase = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
# warmup pass to apply optimizations
lowerCAmelCase = pipe(**self.get_dummy_inputs() )
lowerCAmelCase = self.get_dummy_inputs()
lowerCAmelCase = pipe(**UpperCAmelCase_ ).images
lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
lowerCAmelCase = np.array([0.52761, 0.59977, 0.49033, 0.49619, 0.54282, 0.50311, 0.47600, 0.40918, 0.45203] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def __snake_case ( self ):
lowerCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
lowerCAmelCase = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
lowerCAmelCase = self.get_dummy_inputs()
lowerCAmelCase = pipe(**UpperCAmelCase_ ).images
lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
lowerCAmelCase = np.array([0.52911, 0.60004, 0.49229, 0.49805, 0.54502, 0.50680, 0.47777, 0.41028, 0.45304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def __snake_case ( self ):
lowerCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
lowerCAmelCase = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
lowerCAmelCase = self.get_dummy_inputs()
lowerCAmelCase = pipe(**UpperCAmelCase_ ).images
lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
lowerCAmelCase = np.array([0.52911, 0.60004, 0.49229, 0.49805, 0.54502, 0.50680, 0.47777, 0.41028, 0.45304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def __snake_case ( self ):
lowerCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
lowerCAmelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
lowerCAmelCase = self.get_dummy_inputs()
lowerCAmelCase = pipe(**UpperCAmelCase_ ).images
lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
lowerCAmelCase = np.array([0.65331, 0.58277, 0.48204, 0.56059, 0.53665, 0.56235, 0.50969, 0.40009, 0.46552] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
@nightly
@require_onnxruntime
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@property
def __snake_case ( self ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def __snake_case ( self ):
lowerCAmelCase = ort.SessionOptions()
lowerCAmelCase = False
return options
def __snake_case ( self ):
lowerCAmelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
lowerCAmelCase = init_image.resize((7_68, 5_12) )
# using the PNDM scheduler by default
lowerCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''onnx''' , safety_checker=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
lowerCAmelCase = '''A fantasy landscape, trending on artstation'''
lowerCAmelCase = np.random.RandomState(0 )
lowerCAmelCase = pipe(
prompt=UpperCAmelCase_ , image=UpperCAmelCase_ , strength=0.75 , guidance_scale=7.5 , num_inference_steps=10 , generator=UpperCAmelCase_ , output_type='''np''' , )
lowerCAmelCase = output.images
lowerCAmelCase = images[0, 2_55:2_58, 3_83:3_86, -1]
assert images.shape == (1, 5_12, 7_68, 3)
lowerCAmelCase = np.array([0.4909, 0.5059, 0.5372, 0.4623, 0.4876, 0.5049, 0.4820, 0.4956, 0.5019] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def __snake_case ( self ):
lowerCAmelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
lowerCAmelCase = init_image.resize((7_68, 5_12) )
lowerCAmelCase = LMSDiscreteScheduler.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , subfolder='''scheduler''' , revision='''onnx''' )
lowerCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , revision='''onnx''' , scheduler=UpperCAmelCase_ , safety_checker=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
lowerCAmelCase = '''A fantasy landscape, trending on artstation'''
lowerCAmelCase = np.random.RandomState(0 )
lowerCAmelCase = pipe(
prompt=UpperCAmelCase_ , image=UpperCAmelCase_ , strength=0.75 , guidance_scale=7.5 , num_inference_steps=20 , generator=UpperCAmelCase_ , output_type='''np''' , )
lowerCAmelCase = output.images
lowerCAmelCase = images[0, 2_55:2_58, 3_83:3_86, -1]
assert images.shape == (1, 5_12, 7_68, 3)
lowerCAmelCase = np.array([0.8043, 0.926, 0.9581, 0.8119, 0.8954, 0.913, 0.7209, 0.7463, 0.7431] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
| 702
|
from __future__ import annotations
from typing import Generic, TypeVar
UpperCAmelCase_ =TypeVar("""T""")
class __UpperCamelCase ( Generic[T] ):
'''simple docstring'''
def __init__( self , UpperCAmelCase_ ):
lowerCAmelCase = data
lowerCAmelCase = self
lowerCAmelCase = 0
class __UpperCamelCase ( Generic[T] ):
'''simple docstring'''
def __init__( self ):
# map from node name to the node object
lowerCAmelCase = {}
def __snake_case ( self , UpperCAmelCase_ ):
# create a new set with x as its member
lowerCAmelCase = DisjointSetTreeNode(UpperCAmelCase_ )
def __snake_case ( self , UpperCAmelCase_ ):
# find the set x belongs to (with path-compression)
lowerCAmelCase = self.map[data]
if elem_ref != elem_ref.parent:
lowerCAmelCase = self.find_set(elem_ref.parent.data )
return elem_ref.parent
def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ ):
# helper function for union operation
if nodea.rank > nodea.rank:
lowerCAmelCase = nodea
else:
lowerCAmelCase = nodea
if nodea.rank == nodea.rank:
nodea.rank += 1
def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ ):
# merge 2 disjoint sets
self.link(self.find_set(UpperCAmelCase_ ) , self.find_set(UpperCAmelCase_ ) )
class __UpperCamelCase ( Generic[T] ):
'''simple docstring'''
def __init__( self ):
# connections: map from the node to the neighbouring nodes (with weights)
lowerCAmelCase = {}
def __snake_case ( self , UpperCAmelCase_ ):
# add a node ONLY if its not present in the graph
if node not in self.connections:
lowerCAmelCase = {}
def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
# add an edge with the given weight
self.add_node(UpperCAmelCase_ )
self.add_node(UpperCAmelCase_ )
lowerCAmelCase = weight
lowerCAmelCase = weight
def __snake_case ( self ):
lowerCAmelCase = []
lowerCAmelCase = set()
for start in self.connections:
for end in self.connections[start]:
if (start, end) not in seen:
seen.add((end, start) )
edges.append((start, end, self.connections[start][end]) )
edges.sort(key=lambda UpperCAmelCase_ : x[2] )
# creating the disjoint set
lowerCAmelCase = DisjointSetTree[T]()
for node in self.connections:
disjoint_set.make_set(UpperCAmelCase_ )
# MST generation
lowerCAmelCase = 0
lowerCAmelCase = 0
lowerCAmelCase = GraphUndirectedWeighted[T]()
while num_edges < len(self.connections ) - 1:
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = edges[index]
index += 1
lowerCAmelCase = disjoint_set.find_set(UpperCAmelCase_ )
lowerCAmelCase = disjoint_set.find_set(UpperCAmelCase_ )
if parent_u != parent_v:
num_edges += 1
graph.add_edge(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
disjoint_set.union(UpperCAmelCase_ , UpperCAmelCase_ )
return graph
| 33
| 0
|
from collections import deque
def UpperCAmelCase ( _snake_case ):
lowerCAmelCase = len(_snake_case )
lowerCAmelCase = deque()
lowerCAmelCase = [False for _ in range(_snake_case )]
lowerCAmelCase = [-1 for _ in range(_snake_case )]
lowerCAmelCase = index_of[:]
def strong_connect(_snake_case , _snake_case , _snake_case ):
lowerCAmelCase = index # the number when this node is seen
lowerCAmelCase = index # lowest rank node reachable from here
index += 1
stack.append(_snake_case )
lowerCAmelCase = True
for w in g[v]:
if index_of[w] == -1:
lowerCAmelCase = strong_connect(_snake_case , _snake_case , _snake_case )
lowerCAmelCase = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
elif on_stack[w]:
lowerCAmelCase = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
if lowlink_of[v] == index_of[v]:
lowerCAmelCase = []
lowerCAmelCase = stack.pop()
lowerCAmelCase = False
component.append(_snake_case )
while w != v:
lowerCAmelCase = stack.pop()
lowerCAmelCase = False
component.append(_snake_case )
components.append(_snake_case )
return index
lowerCAmelCase = []
for v in range(_snake_case ):
if index_of[v] == -1:
strong_connect(_snake_case , 0 , _snake_case )
return components
def UpperCAmelCase ( _snake_case , _snake_case ):
lowerCAmelCase = [[] for _ in range(_snake_case )]
for u, v in edges:
g[u].append(_snake_case )
return g
if __name__ == "__main__":
# Test
UpperCAmelCase_ =7
UpperCAmelCase_ =[0, 0, 1, 2, 3, 3, 4, 4, 6]
UpperCAmelCase_ =[1, 3, 2, 0, 1, 4, 5, 6, 5]
UpperCAmelCase_ =[(u, v) for u, v in zip(source, target)]
UpperCAmelCase_ =create_graph(n_vertices, edges)
assert [[5], [6], [4], [3, 2, 1, 0]] == tarjan(g)
| 703
|
def UpperCAmelCase ( _snake_case , _snake_case , _snake_case ):
def count_of_possible_combinations(_snake_case ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(_snake_case )
def UpperCAmelCase ( _snake_case , _snake_case , _snake_case ):
def count_of_possible_combinations_with_dp_array(
_snake_case , _snake_case ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
lowerCAmelCase = sum(
count_of_possible_combinations_with_dp_array(target - item , _snake_case )
for item in array )
lowerCAmelCase = answer
return answer
lowerCAmelCase = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(_snake_case , _snake_case )
def UpperCAmelCase ( _snake_case , _snake_case , _snake_case ):
lowerCAmelCase = [0] * (target + 1)
lowerCAmelCase = 1
for i in range(1 , target + 1 ):
for j in range(_snake_case ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase_ =3
UpperCAmelCase_ =5
UpperCAmelCase_ =[1, 2, 5]
print(combination_sum_iv(n, array, target))
| 33
| 0
|
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __UpperCamelCase :
'''simple docstring'''
def __init__( self , UpperCAmelCase_ , UpperCAmelCase_=13 , UpperCAmelCase_=30 , UpperCAmelCase_=2 , UpperCAmelCase_=3 , UpperCAmelCase_=True , UpperCAmelCase_=True , UpperCAmelCase_=32 , UpperCAmelCase_=5 , UpperCAmelCase_=4 , UpperCAmelCase_=37 , UpperCAmelCase_="gelu" , UpperCAmelCase_=0.1 , UpperCAmelCase_=0.1 , UpperCAmelCase_=10 , UpperCAmelCase_=0.02 , UpperCAmelCase_=None , UpperCAmelCase_=2 , ):
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = image_size
lowerCAmelCase = patch_size
lowerCAmelCase = num_channels
lowerCAmelCase = is_training
lowerCAmelCase = use_labels
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = type_sequence_label_size
lowerCAmelCase = initializer_range
lowerCAmelCase = scope
lowerCAmelCase = encoder_stride
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
lowerCAmelCase = (image_size // patch_size) ** 2
lowerCAmelCase = num_patches + 1
def __snake_case ( self ):
lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase = None
if self.use_labels:
lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase = self.get_config()
return config, pixel_values, labels
def __snake_case ( self ):
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCAmelCase_ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
lowerCAmelCase = ViTModel(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
lowerCAmelCase = model(UpperCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
lowerCAmelCase = ViTForMaskedImageModeling(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
lowerCAmelCase = model(UpperCAmelCase_ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowerCAmelCase = 1
lowerCAmelCase = ViTForMaskedImageModeling(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
lowerCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCAmelCase = model(UpperCAmelCase_ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
lowerCAmelCase = self.type_sequence_label_size
lowerCAmelCase = ViTForImageClassification(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
lowerCAmelCase = model(UpperCAmelCase_ , labels=UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowerCAmelCase = 1
lowerCAmelCase = ViTForImageClassification(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
lowerCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCAmelCase = model(UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __snake_case ( self ):
lowerCAmelCase = self.prepare_config_and_inputs()
(
(
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) ,
) = config_and_inputs
lowerCAmelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __UpperCamelCase ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
__a : List[str] =(
(
ViTModel,
ViTForImageClassification,
ViTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
__a : int =(
{"""feature-extraction""": ViTModel, """image-classification""": ViTForImageClassification}
if is_torch_available()
else {}
)
__a : List[Any] =True
__a : List[Any] =False
__a : List[Any] =False
__a : Any =False
def __snake_case ( self ):
lowerCAmelCase = ViTModelTester(self )
lowerCAmelCase = ConfigTester(self , config_class=UpperCAmelCase_ , has_text_modality=UpperCAmelCase_ , hidden_size=37 )
def __snake_case ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViT does not use inputs_embeds''' )
def __snake_case ( self ):
pass
def __snake_case ( self ):
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase = model_class(UpperCAmelCase_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCAmelCase_ , nn.Linear ) )
def __snake_case ( self ):
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase = model_class(UpperCAmelCase_ )
lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase = [*signature.parameters.keys()]
lowerCAmelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , UpperCAmelCase_ )
def __snake_case ( self ):
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
def __snake_case ( self ):
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*UpperCAmelCase_ )
def __snake_case ( self ):
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase_ )
@slow
def __snake_case ( self ):
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase = ViTModel.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
def UpperCAmelCase ( ):
lowerCAmelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class __UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __snake_case ( self ):
return ViTImageProcessor.from_pretrained('''google/vit-base-patch16-224''' ) if is_vision_available() else None
@slow
def __snake_case ( self ):
lowerCAmelCase = ViTForImageClassification.from_pretrained('''google/vit-base-patch16-224''' ).to(UpperCAmelCase_ )
lowerCAmelCase = self.default_image_processor
lowerCAmelCase = prepare_img()
lowerCAmelCase = image_processor(images=UpperCAmelCase_ , return_tensors='''pt''' ).to(UpperCAmelCase_ )
# forward pass
with torch.no_grad():
lowerCAmelCase = model(**UpperCAmelCase_ )
# verify the logits
lowerCAmelCase = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , UpperCAmelCase_ )
lowerCAmelCase = torch.tensor([-0.2744, 0.8215, -0.0836] ).to(UpperCAmelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase_ , atol=1E-4 ) )
@slow
def __snake_case ( self ):
# ViT models have an `interpolate_pos_encoding` argument in their forward method,
# allowing to interpolate the pre-trained position embeddings in order to use
# the model on higher resolutions. The DINO model by Facebook AI leverages this
# to visualize self-attention on higher resolution images.
lowerCAmelCase = ViTModel.from_pretrained('''facebook/dino-vits8''' ).to(UpperCAmelCase_ )
lowerCAmelCase = ViTImageProcessor.from_pretrained('''facebook/dino-vits8''' , size=4_80 )
lowerCAmelCase = prepare_img()
lowerCAmelCase = image_processor(images=UpperCAmelCase_ , return_tensors='''pt''' )
lowerCAmelCase = inputs.pixel_values.to(UpperCAmelCase_ )
# forward pass
with torch.no_grad():
lowerCAmelCase = model(UpperCAmelCase_ , interpolate_pos_encoding=UpperCAmelCase_ )
# verify the logits
lowerCAmelCase = torch.Size((1, 36_01, 3_84) )
self.assertEqual(outputs.last_hidden_state.shape , UpperCAmelCase_ )
lowerCAmelCase = torch.tensor(
[[4.2340, 4.3906, -6.6692], [4.5463, 1.8928, -6.7257], [4.4429, 0.8496, -5.8585]] ).to(UpperCAmelCase_ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , UpperCAmelCase_ , atol=1E-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def __snake_case ( self ):
lowerCAmelCase = ViTModel.from_pretrained('''facebook/dino-vits8''' , torch_dtype=torch.floataa , device_map='''auto''' )
lowerCAmelCase = self.default_image_processor
lowerCAmelCase = prepare_img()
lowerCAmelCase = image_processor(images=UpperCAmelCase_ , return_tensors='''pt''' )
lowerCAmelCase = inputs.pixel_values.to(UpperCAmelCase_ )
# forward pass to make sure inference works in fp16
with torch.no_grad():
lowerCAmelCase = model(UpperCAmelCase_ )
| 704
|
import torch
from diffusers import StableDiffusionPipeline
UpperCAmelCase_ ="""path-to-your-trained-model"""
UpperCAmelCase_ =StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to("""cuda""")
UpperCAmelCase_ ="""A photo of sks dog in a bucket"""
UpperCAmelCase_ =pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0]
image.save("""dog-bucket.png""")
| 33
| 0
|
'''simple docstring'''
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class __UpperCamelCase ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
__a : List[Any] =StableUnCLIPPipeline
__a : List[str] =TEXT_TO_IMAGE_PARAMS
__a : Any =TEXT_TO_IMAGE_BATCH_PARAMS
__a : Any =TEXT_TO_IMAGE_IMAGE_PARAMS
__a : str =TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
__a : Optional[int] =False
def __snake_case ( self ):
lowerCAmelCase = 32
lowerCAmelCase = embedder_hidden_size
# prior components
torch.manual_seed(0 )
lowerCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
lowerCAmelCase = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=UpperCAmelCase_ , projection_dim=UpperCAmelCase_ , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) )
torch.manual_seed(0 )
lowerCAmelCase = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=UpperCAmelCase_ , num_layers=1 , )
torch.manual_seed(0 )
lowerCAmelCase = DDPMScheduler(
variance_type='''fixed_small_log''' , prediction_type='''sample''' , num_train_timesteps=10_00 , clip_sample=UpperCAmelCase_ , clip_sample_range=5.0 , beta_schedule='''squaredcos_cap_v2''' , )
# regular denoising components
torch.manual_seed(0 )
lowerCAmelCase = StableUnCLIPImageNormalizer(embedding_dim=UpperCAmelCase_ )
lowerCAmelCase = DDPMScheduler(beta_schedule='''squaredcos_cap_v2''' )
torch.manual_seed(0 )
lowerCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
lowerCAmelCase = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=UpperCAmelCase_ , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) )
torch.manual_seed(0 )
lowerCAmelCase = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='''projection''' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=UpperCAmelCase_ , layers_per_block=1 , upcast_attention=UpperCAmelCase_ , use_linear_projection=UpperCAmelCase_ , )
torch.manual_seed(0 )
lowerCAmelCase = DDIMScheduler(
beta_schedule='''scaled_linear''' , beta_start=0.00085 , beta_end=0.012 , prediction_type='''v_prediction''' , set_alpha_to_one=UpperCAmelCase_ , steps_offset=1 , )
torch.manual_seed(0 )
lowerCAmelCase = AutoencoderKL()
lowerCAmelCase = {
# prior components
'''prior_tokenizer''': prior_tokenizer,
'''prior_text_encoder''': prior_text_encoder,
'''prior''': prior,
'''prior_scheduler''': prior_scheduler,
# image noising components
'''image_normalizer''': image_normalizer,
'''image_noising_scheduler''': image_noising_scheduler,
# regular denoising components
'''tokenizer''': tokenizer,
'''text_encoder''': text_encoder,
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
}
return components
def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_=0 ):
if str(UpperCAmelCase_ ).startswith('''mps''' ):
lowerCAmelCase = torch.manual_seed(UpperCAmelCase_ )
else:
lowerCAmelCase = torch.Generator(device=UpperCAmelCase_ ).manual_seed(UpperCAmelCase_ )
lowerCAmelCase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''prior_num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def __snake_case ( self ):
lowerCAmelCase = torch_device == '''cpu'''
self._test_attention_slicing_forward_pass(test_max_difference=UpperCAmelCase_ )
def __snake_case ( self ):
lowerCAmelCase = torch_device in ['''cpu''', '''mps''']
self._test_inference_batch_single_identical(test_max_difference=UpperCAmelCase_ )
@slow
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __snake_case ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __snake_case ( self ):
lowerCAmelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy''' )
lowerCAmelCase = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa )
pipe.to(UpperCAmelCase_ )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowerCAmelCase = torch.Generator(device='''cpu''' ).manual_seed(0 )
lowerCAmelCase = pipe('''anime turle''' , generator=UpperCAmelCase_ , output_type='''np''' )
lowerCAmelCase = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(UpperCAmelCase_ , UpperCAmelCase_ )
def __snake_case ( self ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowerCAmelCase = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa )
lowerCAmelCase = pipe.to(UpperCAmelCase_ )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowerCAmelCase = pipe(
'''anime turtle''' , prior_num_inference_steps=2 , num_inference_steps=2 , output_type='''np''' , )
lowerCAmelCase = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 705
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase_ ={
"""configuration_jukebox""": [
"""JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""JukeboxConfig""",
"""JukeboxPriorConfig""",
"""JukeboxVQVAEConfig""",
],
"""tokenization_jukebox""": ["""JukeboxTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ =[
"""JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""JukeboxModel""",
"""JukeboxPreTrainedModel""",
"""JukeboxVQVAE""",
"""JukeboxPrior""",
]
if TYPE_CHECKING:
from .configuration_jukebox import (
JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP,
JukeboxConfig,
JukeboxPriorConfig,
JukeboxVQVAEConfig,
)
from .tokenization_jukebox import JukeboxTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_jukebox import (
JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST,
JukeboxModel,
JukeboxPreTrainedModel,
JukeboxPrior,
JukeboxVQVAE,
)
else:
import sys
UpperCAmelCase_ =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 33
| 0
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional
from packaging import version
if TYPE_CHECKING:
from ... import PreTrainedTokenizer, TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import is_torch_available, logging
UpperCAmelCase_ =logging.get_logger(__name__)
UpperCAmelCase_ ={
"""bigscience/bloom""": """https://huggingface.co/bigscience/bloom/resolve/main/config.json""",
"""bigscience/bloom-560m""": """https://huggingface.co/bigscience/bloom-560m/blob/main/config.json""",
"""bigscience/bloom-1b1""": """https://huggingface.co/bigscience/bloom-1b1/blob/main/config.json""",
"""bigscience/bloom-1b7""": """https://huggingface.co/bigscience/bloom-1b7/blob/main/config.json""",
"""bigscience/bloom-3b""": """https://huggingface.co/bigscience/bloom-3b/blob/main/config.json""",
"""bigscience/bloom-7b1""": """https://huggingface.co/bigscience/bloom-7b1/blob/main/config.json""",
}
class __UpperCamelCase ( __UpperCAmelCase ):
'''simple docstring'''
__a : Union[str, Any] ="""bloom"""
__a : Any =["""past_key_values"""]
__a : str ={
"""num_hidden_layers""": """n_layer""",
"""num_attention_heads""": """n_head""",
}
def __init__( self , UpperCAmelCase_=25_08_80 , UpperCAmelCase_=64 , UpperCAmelCase_=2 , UpperCAmelCase_=8 , UpperCAmelCase_=1E-5 , UpperCAmelCase_=0.02 , UpperCAmelCase_=True , UpperCAmelCase_=1 , UpperCAmelCase_=2 , UpperCAmelCase_=False , UpperCAmelCase_=0.0 , UpperCAmelCase_=0.0 , UpperCAmelCase_=1 , UpperCAmelCase_=False , **UpperCAmelCase_ , ):
lowerCAmelCase = vocab_size
# Backward compatibility with n_embed kwarg
lowerCAmelCase = kwargs.pop('''n_embed''' , UpperCAmelCase_ )
lowerCAmelCase = hidden_size if n_embed is None else n_embed
lowerCAmelCase = n_layer
lowerCAmelCase = n_head
lowerCAmelCase = layer_norm_epsilon
lowerCAmelCase = initializer_range
lowerCAmelCase = use_cache
lowerCAmelCase = pretraining_tp
lowerCAmelCase = apply_residual_connection_post_layernorm
lowerCAmelCase = hidden_dropout
lowerCAmelCase = attention_dropout
lowerCAmelCase = bos_token_id
lowerCAmelCase = eos_token_id
lowerCAmelCase = slow_but_exact
super().__init__(bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , **UpperCAmelCase_ )
class __UpperCamelCase ( __UpperCAmelCase ):
'''simple docstring'''
__a : List[Any] =version.parse("""1.12""" )
def __init__( self , UpperCAmelCase_ , UpperCAmelCase_ = "default" , UpperCAmelCase_ = None , UpperCAmelCase_ = False , ):
super().__init__(UpperCAmelCase_ , task=UpperCAmelCase_ , patching_specs=UpperCAmelCase_ , use_past=UpperCAmelCase_ )
if not getattr(self._config , '''pad_token_id''' , UpperCAmelCase_ ):
# TODO: how to do that better?
lowerCAmelCase = 0
@property
def __snake_case ( self ):
lowerCAmelCase = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} )
if self.use_past:
# BLOOM stores values on dynamic axis 2. For more details see: https://github.com/huggingface/transformers/pull/18344
self.fill_with_past_key_values_(UpperCAmelCase_ , direction='''inputs''' , inverted_values_shape=UpperCAmelCase_ )
lowerCAmelCase = {0: '''batch''', 1: '''past_sequence + sequence'''}
else:
lowerCAmelCase = {0: '''batch''', 1: '''sequence'''}
return common_inputs
@property
def __snake_case ( self ):
return self._config.n_layer
@property
def __snake_case ( self ):
return self._config.n_head
@property
def __snake_case ( self ):
return 1E-3
def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ = -1 , UpperCAmelCase_ = -1 , UpperCAmelCase_ = False , UpperCAmelCase_ = None , ):
lowerCAmelCase = super(UpperCAmelCase_ , self ).generate_dummy_inputs(
UpperCAmelCase_ , batch_size=UpperCAmelCase_ , seq_length=UpperCAmelCase_ , is_pair=UpperCAmelCase_ , framework=UpperCAmelCase_ )
# We need to order the input in the way they appears in the forward()
lowerCAmelCase = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
lowerCAmelCase , lowerCAmelCase = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
lowerCAmelCase = seqlen + 2
lowerCAmelCase = self._config.hidden_size // self.num_attention_heads
lowerCAmelCase = (
batch * self.num_attention_heads,
head_dim,
past_key_values_length,
)
lowerCAmelCase = (
batch * self.num_attention_heads,
past_key_values_length,
head_dim,
)
lowerCAmelCase = [
(torch.zeros(UpperCAmelCase_ ), torch.zeros(UpperCAmelCase_ )) for _ in range(self.num_layers )
]
lowerCAmelCase = common_inputs['''attention_mask''']
if self.use_past:
lowerCAmelCase = ordered_inputs['''attention_mask'''].dtype
lowerCAmelCase = torch.cat(
[ordered_inputs['''attention_mask'''], torch.ones(UpperCAmelCase_ , UpperCAmelCase_ , dtype=UpperCAmelCase_ )] , dim=1 )
return ordered_inputs
@property
def __snake_case ( self ):
return 13
| 706
|
import json
import os
import pickle
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.tokenization_bart import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch
if is_faiss_available():
import faiss
@require_faiss
class __UpperCamelCase ( __UpperCAmelCase ):
'''simple docstring'''
def __snake_case ( self ):
lowerCAmelCase = tempfile.mkdtemp()
lowerCAmelCase = 8
# DPR tok
lowerCAmelCase = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
lowerCAmelCase = os.path.join(self.tmpdirname , '''dpr_tokenizer''' )
os.makedirs(UpperCAmelCase_ , exist_ok=UpperCAmelCase_ )
lowerCAmelCase = os.path.join(UpperCAmelCase_ , DPR_VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
# BART tok
lowerCAmelCase = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
lowerCAmelCase = dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_ ) ) ) )
lowerCAmelCase = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
lowerCAmelCase = {'''unk_token''': '''<unk>'''}
lowerCAmelCase = os.path.join(self.tmpdirname , '''bart_tokenizer''' )
os.makedirs(UpperCAmelCase_ , exist_ok=UpperCAmelCase_ )
lowerCAmelCase = os.path.join(UpperCAmelCase_ , BART_VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCAmelCase = os.path.join(UpperCAmelCase_ , BART_VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(UpperCAmelCase_ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(UpperCAmelCase_ ) )
def __snake_case ( self ):
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) )
def __snake_case ( self ):
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) )
def __snake_case ( self ):
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''bart_tokenizer''' ) )
def __snake_case ( self ):
shutil.rmtree(self.tmpdirname )
def __snake_case ( self ):
lowerCAmelCase = Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''text''': ['''foo''', '''bar'''],
'''title''': ['''Foo''', '''Bar'''],
'''embeddings''': [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )],
} )
dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT )
return dataset
def __snake_case ( self ):
lowerCAmelCase = self.get_dummy_dataset()
lowerCAmelCase = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , )
with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset:
lowerCAmelCase = dataset
lowerCAmelCase = RagRetriever(
UpperCAmelCase_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
return retriever
def __snake_case ( self , UpperCAmelCase_ ):
lowerCAmelCase = self.get_dummy_dataset()
lowerCAmelCase = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''custom''' , )
if from_disk:
lowerCAmelCase = os.path.join(self.tmpdirname , '''dataset''' )
lowerCAmelCase = os.path.join(self.tmpdirname , '''index.faiss''' )
dataset.get_index('''embeddings''' ).save(os.path.join(self.tmpdirname , '''index.faiss''' ) )
dataset.drop_index('''embeddings''' )
dataset.save_to_disk(os.path.join(self.tmpdirname , '''dataset''' ) )
del dataset
lowerCAmelCase = RagRetriever(
UpperCAmelCase_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
else:
lowerCAmelCase = RagRetriever(
UpperCAmelCase_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , UpperCAmelCase_ ) , )
return retriever
def __snake_case ( self ):
lowerCAmelCase = Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''text''': ['''foo''', '''bar'''],
'''title''': ['''Foo''', '''Bar'''],
'''embeddings''': [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )],
} )
dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT )
lowerCAmelCase = os.path.join(self.tmpdirname , '''hf_bert_base.hnswSQ8_correct_phi_128.c_index''' )
dataset.save_faiss_index('''embeddings''' , index_file_name + '''.index.dpr''' )
pickle.dump(dataset['''id'''] , open(index_file_name + '''.index_meta.dpr''' , '''wb''' ) )
lowerCAmelCase = os.path.join(self.tmpdirname , '''psgs_w100.tsv.pkl''' )
lowerCAmelCase = {sample['''id''']: [sample['''text'''], sample['''title''']] for sample in dataset}
pickle.dump(UpperCAmelCase_ , open(UpperCAmelCase_ , '''wb''' ) )
lowerCAmelCase = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''legacy''' , index_path=self.tmpdirname , )
lowerCAmelCase = RagRetriever(
UpperCAmelCase_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() )
return retriever
def __snake_case ( self ):
lowerCAmelCase = 1
lowerCAmelCase = self.get_dummy_canonical_hf_index_retriever()
lowerCAmelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = retriever.retrieve(UpperCAmelCase_ , n_docs=UpperCAmelCase_ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(UpperCAmelCase_ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , UpperCAmelCase_ )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def __snake_case ( self ):
lowerCAmelCase = self.get_dummy_canonical_hf_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset:
lowerCAmelCase = self.get_dummy_dataset()
retriever.save_pretrained(UpperCAmelCase_ )
lowerCAmelCase = RagRetriever.from_pretrained(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
lowerCAmelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowerCAmelCase = retriever.retrieve(UpperCAmelCase_ , n_docs=1 )
self.assertTrue(out is not None )
def __snake_case ( self ):
lowerCAmelCase = 1
lowerCAmelCase = self.get_dummy_custom_hf_index_retriever(from_disk=UpperCAmelCase_ )
lowerCAmelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = retriever.retrieve(UpperCAmelCase_ , n_docs=UpperCAmelCase_ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(UpperCAmelCase_ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , UpperCAmelCase_ )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def __snake_case ( self ):
lowerCAmelCase = self.get_dummy_custom_hf_index_retriever(from_disk=UpperCAmelCase_ )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(UpperCAmelCase_ )
lowerCAmelCase = RagRetriever.from_pretrained(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
lowerCAmelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowerCAmelCase = retriever.retrieve(UpperCAmelCase_ , n_docs=1 )
self.assertTrue(out is not None )
def __snake_case ( self ):
lowerCAmelCase = 1
lowerCAmelCase = self.get_dummy_custom_hf_index_retriever(from_disk=UpperCAmelCase_ )
lowerCAmelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = retriever.retrieve(UpperCAmelCase_ , n_docs=UpperCAmelCase_ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(UpperCAmelCase_ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , UpperCAmelCase_ )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def __snake_case ( self ):
lowerCAmelCase = self.get_dummy_custom_hf_index_retriever(from_disk=UpperCAmelCase_ )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(UpperCAmelCase_ )
lowerCAmelCase = RagRetriever.from_pretrained(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
lowerCAmelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowerCAmelCase = retriever.retrieve(UpperCAmelCase_ , n_docs=1 )
self.assertTrue(out is not None )
def __snake_case ( self ):
lowerCAmelCase = 1
lowerCAmelCase = self.get_dummy_legacy_index_retriever()
lowerCAmelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = retriever.retrieve(UpperCAmelCase_ , n_docs=UpperCAmelCase_ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(UpperCAmelCase_ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''text'''] ) , UpperCAmelCase_ )
self.assertEqual(doc_dicts[0]['''text'''][0] , '''bar''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''text'''][0] , '''foo''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def __snake_case ( self ):
lowerCAmelCase = self.get_dummy_legacy_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(UpperCAmelCase_ )
lowerCAmelCase = RagRetriever.from_pretrained(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
lowerCAmelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowerCAmelCase = retriever.retrieve(UpperCAmelCase_ , n_docs=1 )
self.assertTrue(out is not None )
@require_torch
@require_tokenizers
@require_sentencepiece
def __snake_case ( self ):
import torch
lowerCAmelCase = 1
lowerCAmelCase = self.get_dummy_canonical_hf_index_retriever()
lowerCAmelCase = [[5, 7], [10, 11]]
lowerCAmelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowerCAmelCase = retriever(UpperCAmelCase_ , UpperCAmelCase_ , prefix=retriever.config.generator.prefix , n_docs=UpperCAmelCase_ )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = (
out['''context_input_ids'''],
out['''context_attention_mask'''],
out['''retrieved_doc_embeds'''],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , np.ndarray )
lowerCAmelCase = retriever(
UpperCAmelCase_ , UpperCAmelCase_ , prefix=retriever.config.generator.prefix , n_docs=UpperCAmelCase_ , return_tensors='''pt''' , )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = ( # noqa: F841
out['''context_input_ids'''],
out['''context_attention_mask'''],
out['''retrieved_doc_embeds'''],
out['''doc_ids'''],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(UpperCAmelCase_ , torch.Tensor )
self.assertIsInstance(UpperCAmelCase_ , torch.Tensor )
self.assertIsInstance(UpperCAmelCase_ , torch.Tensor )
@require_torch
@require_tokenizers
@require_sentencepiece
def __snake_case ( self ):
lowerCAmelCase = self.get_dpr_ctx_encoder_tokenizer()
lowerCAmelCase = 1
lowerCAmelCase = self.get_dummy_custom_hf_index_retriever(from_disk=UpperCAmelCase_ )
retriever.set_ctx_encoder_tokenizer(UpperCAmelCase_ )
lowerCAmelCase = [[5, 7], [10, 11]]
lowerCAmelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowerCAmelCase = retriever(UpperCAmelCase_ , UpperCAmelCase_ , prefix=retriever.config.generator.prefix , n_docs=UpperCAmelCase_ )
self.assertEqual(
len(UpperCAmelCase_ ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs
self.assertEqual(
all(k in out for k in ('''tokenized_doc_ids''', '''tokenized_doc_attention_mask''') ) , UpperCAmelCase_ ) # check for doc token related keys in dictionary.
| 33
| 0
|
def UpperCAmelCase ( _snake_case , _snake_case ):
if a < 0 or b < 0:
raise ValueError('''the value of both inputs must be positive''' )
lowerCAmelCase = str(bin(_snake_case ) )[2:] # remove the leading "0b"
lowerCAmelCase = str(bin(_snake_case ) )[2:] # remove the leading "0b"
lowerCAmelCase = max(len(_snake_case ) , len(_snake_case ) )
return "0b" + "".join(
str(int(char_a == '''1''' and char_b == '''1''' ) )
for char_a, char_b in zip(a_binary.zfill(_snake_case ) , b_binary.zfill(_snake_case ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 707
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ =logging.get_logger(__name__)
UpperCAmelCase_ ={
"""google/switch-base-8""": """https://huggingface.co/google/switch-base-8/blob/main/config.json""",
}
class __UpperCamelCase ( __UpperCAmelCase ):
'''simple docstring'''
__a : Any ="""switch_transformers"""
__a : Union[str, Any] =["""past_key_values"""]
__a : Dict ={"""hidden_size""": """d_model""", """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers"""}
def __init__( self , UpperCAmelCase_=3_21_28 , UpperCAmelCase_=7_68 , UpperCAmelCase_=64 , UpperCAmelCase_=20_48 , UpperCAmelCase_=64 , UpperCAmelCase_=12 , UpperCAmelCase_=3 , UpperCAmelCase_=12 , UpperCAmelCase_=3 , UpperCAmelCase_=12 , UpperCAmelCase_=8 , UpperCAmelCase_=False , UpperCAmelCase_=0.01 , UpperCAmelCase_="float32" , UpperCAmelCase_=False , UpperCAmelCase_=32 , UpperCAmelCase_=1_28 , UpperCAmelCase_=0.1 , UpperCAmelCase_=1E-6 , UpperCAmelCase_=0.001 , UpperCAmelCase_=0.001 , UpperCAmelCase_=1.0 , UpperCAmelCase_="relu" , UpperCAmelCase_=True , UpperCAmelCase_=False , UpperCAmelCase_=True , UpperCAmelCase_=0 , UpperCAmelCase_=1 , **UpperCAmelCase_ , ):
lowerCAmelCase = vocab_size
lowerCAmelCase = d_model
lowerCAmelCase = d_kv
lowerCAmelCase = d_ff
lowerCAmelCase = num_sparse_encoder_layers
lowerCAmelCase = num_layers
lowerCAmelCase = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
lowerCAmelCase = num_sparse_decoder_layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_encoder_layers > 0:
lowerCAmelCase = self.num_layers // self.num_sparse_encoder_layers
else:
lowerCAmelCase = self.num_layers # HACK: this will create 0 sparse layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_decoder_layers > 0:
lowerCAmelCase = self.num_decoder_layers // self.num_sparse_decoder_layers
else:
lowerCAmelCase = self.num_decoder_layers # HACK: this will create 0 sparse layers
lowerCAmelCase = num_heads
lowerCAmelCase = num_experts
lowerCAmelCase = expert_capacity
lowerCAmelCase = router_bias
lowerCAmelCase = router_jitter_noise
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(F"""`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}""" )
lowerCAmelCase = router_dtype
lowerCAmelCase = router_ignore_padding_tokens
lowerCAmelCase = relative_attention_num_buckets
lowerCAmelCase = relative_attention_max_distance
lowerCAmelCase = dropout_rate
lowerCAmelCase = layer_norm_epsilon
lowerCAmelCase = initializer_factor
lowerCAmelCase = feed_forward_proj
lowerCAmelCase = use_cache
lowerCAmelCase = add_router_probs
lowerCAmelCase = router_z_loss_coef
lowerCAmelCase = router_aux_loss_coef
lowerCAmelCase = self.feed_forward_proj.split('''-''' )
lowerCAmelCase = act_info[-1]
lowerCAmelCase = act_info[0] == '''gated'''
if len(UpperCAmelCase_ ) > 1 and act_info[0] != "gated" or len(UpperCAmelCase_ ) > 2:
raise ValueError(
F"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
'''Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '''
'''\'gated-gelu\' or \'relu\'''' )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
lowerCAmelCase = '''gelu_new'''
super().__init__(
pad_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , is_encoder_decoder=UpperCAmelCase_ , **UpperCAmelCase_ , )
| 33
| 0
|
import argparse
import json
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification
def UpperCAmelCase ( _snake_case ):
lowerCAmelCase = SwinConfig()
lowerCAmelCase = swin_name.split('''_''' )
lowerCAmelCase = name_split[1]
lowerCAmelCase = int(name_split[4] )
lowerCAmelCase = int(name_split[3][-1] )
if model_size == "tiny":
lowerCAmelCase = 96
lowerCAmelCase = (2, 2, 6, 2)
lowerCAmelCase = (3, 6, 12, 24)
elif model_size == "small":
lowerCAmelCase = 96
lowerCAmelCase = (2, 2, 18, 2)
lowerCAmelCase = (3, 6, 12, 24)
elif model_size == "base":
lowerCAmelCase = 128
lowerCAmelCase = (2, 2, 18, 2)
lowerCAmelCase = (4, 8, 16, 32)
else:
lowerCAmelCase = 192
lowerCAmelCase = (2, 2, 18, 2)
lowerCAmelCase = (6, 12, 24, 48)
if "in22k" in swin_name:
lowerCAmelCase = 21841
else:
lowerCAmelCase = 1000
lowerCAmelCase = '''huggingface/label-files'''
lowerCAmelCase = '''imagenet-1k-id2label.json'''
lowerCAmelCase = json.load(open(hf_hub_download(_snake_case , _snake_case , repo_type='''dataset''' ) , '''r''' ) )
lowerCAmelCase = {int(_snake_case ): v for k, v in idalabel.items()}
lowerCAmelCase = idalabel
lowerCAmelCase = {v: k for k, v in idalabel.items()}
lowerCAmelCase = img_size
lowerCAmelCase = num_classes
lowerCAmelCase = embed_dim
lowerCAmelCase = depths
lowerCAmelCase = num_heads
lowerCAmelCase = window_size
return config
def UpperCAmelCase ( _snake_case ):
if "patch_embed.proj" in name:
lowerCAmelCase = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
lowerCAmelCase = name.replace('''patch_embed.norm''' , '''embeddings.norm''' )
if "layers" in name:
lowerCAmelCase = '''encoder.''' + name
if "attn.proj" in name:
lowerCAmelCase = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
lowerCAmelCase = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
lowerCAmelCase = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
lowerCAmelCase = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
lowerCAmelCase = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
lowerCAmelCase = name.replace('''mlp.fc2''' , '''output.dense''' )
if name == "norm.weight":
lowerCAmelCase = '''layernorm.weight'''
if name == "norm.bias":
lowerCAmelCase = '''layernorm.bias'''
if "head" in name:
lowerCAmelCase = name.replace('''head''' , '''classifier''' )
else:
lowerCAmelCase = '''swin.''' + name
return name
def UpperCAmelCase ( _snake_case , _snake_case ):
for key in orig_state_dict.copy().keys():
lowerCAmelCase = orig_state_dict.pop(_snake_case )
if "mask" in key:
continue
elif "qkv" in key:
lowerCAmelCase = key.split('''.''' )
lowerCAmelCase = int(key_split[1] )
lowerCAmelCase = int(key_split[3] )
lowerCAmelCase = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
lowerCAmelCase = val[:dim, :]
lowerCAmelCase = val[
dim : dim * 2, :
]
lowerCAmelCase = val[-dim:, :]
else:
lowerCAmelCase = val[
:dim
]
lowerCAmelCase = val[
dim : dim * 2
]
lowerCAmelCase = val[
-dim:
]
else:
lowerCAmelCase = val
return orig_state_dict
def UpperCAmelCase ( _snake_case , _snake_case ):
lowerCAmelCase = timm.create_model(_snake_case , pretrained=_snake_case )
timm_model.eval()
lowerCAmelCase = get_swin_config(_snake_case )
lowerCAmelCase = SwinForImageClassification(_snake_case )
model.eval()
lowerCAmelCase = convert_state_dict(timm_model.state_dict() , _snake_case )
model.load_state_dict(_snake_case )
lowerCAmelCase = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCAmelCase = AutoImageProcessor.from_pretrained('''microsoft/{}'''.format(swin_name.replace('''_''' , '''-''' ) ) )
lowerCAmelCase = Image.open(requests.get(_snake_case , stream=_snake_case ).raw )
lowerCAmelCase = image_processor(images=_snake_case , return_tensors='''pt''' )
lowerCAmelCase = timm_model(inputs['''pixel_values'''] )
lowerCAmelCase = model(**_snake_case ).logits
assert torch.allclose(_snake_case , _snake_case , atol=1E-3 )
print(F"""Saving model {swin_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_snake_case )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_snake_case )
if __name__ == "__main__":
UpperCAmelCase_ =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--swin_name""",
default="""swin_tiny_patch4_window7_224""",
type=str,
help="""Name of the Swin timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
UpperCAmelCase_ =parser.parse_args()
convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
| 708
|
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def UpperCAmelCase ( _snake_case ):
lowerCAmelCase , lowerCAmelCase = analyze_text(_snake_case )
lowerCAmelCase = list(''' ''' + ascii_lowercase )
# what is our total sum of probabilities.
lowerCAmelCase = sum(single_char_strings.values() )
# one length string
lowerCAmelCase = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
lowerCAmelCase = single_char_strings[ch]
lowerCAmelCase = my_str / all_sum
my_fir_sum += prob * math.loga(_snake_case ) # entropy formula.
# print entropy
print(F"""{round(-1 * my_fir_sum ):.1f}""" )
# two len string
lowerCAmelCase = sum(two_char_strings.values() )
lowerCAmelCase = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
lowerCAmelCase = cha + cha
if sequence in two_char_strings:
lowerCAmelCase = two_char_strings[sequence]
lowerCAmelCase = int(_snake_case ) / all_sum
my_sec_sum += prob * math.loga(_snake_case )
# print second entropy
print(F"""{round(-1 * my_sec_sum ):.1f}""" )
# print the difference between them
print(F"""{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}""" )
def UpperCAmelCase ( _snake_case ):
lowerCAmelCase = Counter() # type: ignore
lowerCAmelCase = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 , len(_snake_case ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def UpperCAmelCase ( ):
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 33
| 0
|
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def UpperCAmelCase ( _snake_case ):
lowerCAmelCase = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''encoder.embed_positions._float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(_snake_case , _snake_case )
def UpperCAmelCase ( _snake_case ):
lowerCAmelCase , lowerCAmelCase = emb.weight.shape
lowerCAmelCase = nn.Linear(_snake_case , _snake_case , bias=_snake_case )
lowerCAmelCase = emb.weight.data
return lin_layer
def UpperCAmelCase ( _snake_case ):
lowerCAmelCase = torch.load(_snake_case , map_location='''cpu''' )
lowerCAmelCase = mam_aaa['''args'''] or mam_aaa['''cfg''']['''model''']
lowerCAmelCase = mam_aaa['''model''']
remove_ignore_keys_(_snake_case )
lowerCAmelCase = state_dict['''encoder.embed_tokens.weight'''].shape[0]
lowerCAmelCase = MaMaaaConfig(
vocab_size=_snake_case , max_position_embeddings=1024 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='''relu''' , )
lowerCAmelCase = state_dict['''decoder.embed_tokens.weight''']
lowerCAmelCase = MaMaaaForConditionalGeneration(_snake_case )
model.model.load_state_dict(_snake_case , strict=_snake_case )
lowerCAmelCase = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
UpperCAmelCase_ =argparse.ArgumentParser()
# Required parameters
parser.add_argument("""fairseq_path""", type=str, help="""path to a model.pt on local filesystem.""")
parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
UpperCAmelCase_ =parser.parse_args()
UpperCAmelCase_ =convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path)
| 709
|
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __UpperCamelCase ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
__a : Tuple =IFInpaintingSuperResolutionPipeline
__a : Dict =TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"""width""", """height"""}
__a : int =TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({"""original_image"""} )
__a : Union[str, Any] =PipelineTesterMixin.required_optional_params - {"""latents"""}
def __snake_case ( self ):
return self._get_superresolution_dummy_components()
def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_=0 ):
if str(UpperCAmelCase_ ).startswith('''mps''' ):
lowerCAmelCase = torch.manual_seed(UpperCAmelCase_ )
else:
lowerCAmelCase = torch.Generator(device=UpperCAmelCase_ ).manual_seed(UpperCAmelCase_ )
lowerCAmelCase = floats_tensor((1, 3, 16, 16) , rng=random.Random(UpperCAmelCase_ ) ).to(UpperCAmelCase_ )
lowerCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCAmelCase_ ) ).to(UpperCAmelCase_ )
lowerCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCAmelCase_ ) ).to(UpperCAmelCase_ )
lowerCAmelCase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''original_image''': original_image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def __snake_case ( self ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def __snake_case ( self ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' )
def __snake_case ( self ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def __snake_case ( self ):
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def __snake_case ( self ):
self._test_save_load_local()
def __snake_case ( self ):
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 33
| 0
|
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=__UpperCAmelCase )
class __UpperCamelCase ( __UpperCAmelCase ):
'''simple docstring'''
__a : str =field(default="""language-modeling""" , metadata={"""include_in_asdict_even_if_is_default""": True} )
__a : ClassVar[Features] =Features({"""text""": Value("""string""" )} )
__a : ClassVar[Features] =Features({} )
__a : str ="text"
@property
def __snake_case ( self ):
return {self.text_column: "text"}
| 710
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
UpperCAmelCase_ ={
"""configuration_efficientformer""": [
"""EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""EfficientFormerConfig""",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ =["""EfficientFormerImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ =[
"""EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""EfficientFormerForImageClassification""",
"""EfficientFormerForImageClassificationWithTeacher""",
"""EfficientFormerModel""",
"""EfficientFormerPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ =[
"""TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFEfficientFormerForImageClassification""",
"""TFEfficientFormerForImageClassificationWithTeacher""",
"""TFEfficientFormerModel""",
"""TFEfficientFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientformer import EfficientFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientformer import (
EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientFormerForImageClassification,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerModel,
EfficientFormerPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
TFEfficientFormerPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 33
| 0
|
'''simple docstring'''
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class __UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __snake_case ( self ):
lowerCAmelCase = '''ylacombe/bark-small'''
lowerCAmelCase = tempfile.mkdtemp()
lowerCAmelCase = '''en_speaker_1'''
lowerCAmelCase = '''This is a test string'''
lowerCAmelCase = '''speaker_embeddings_path.json'''
lowerCAmelCase = '''speaker_embeddings'''
def __snake_case ( self , **UpperCAmelCase_ ):
return AutoTokenizer.from_pretrained(self.checkpoint , **UpperCAmelCase_ )
def __snake_case ( self ):
shutil.rmtree(self.tmpdirname )
def __snake_case ( self ):
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = BarkProcessor(tokenizer=UpperCAmelCase_ )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def __snake_case ( self ):
lowerCAmelCase = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
lowerCAmelCase = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
lowerCAmelCase = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token='''(BOS)''' , eos_token='''(EOS)''' , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def __snake_case ( self ):
lowerCAmelCase = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
lowerCAmelCase = 35
lowerCAmelCase = 2
lowerCAmelCase = 8
lowerCAmelCase = {
'''semantic_prompt''': np.ones(UpperCAmelCase_ ),
'''coarse_prompt''': np.ones((nb_codebooks_coarse, seq_len) ),
'''fine_prompt''': np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
lowerCAmelCase = processor(text=self.input_string , voice_preset=UpperCAmelCase_ )
lowerCAmelCase = inputs['''history_prompt''']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(UpperCAmelCase_ , np.array([] ) ).tolist() )
# test loading voice preset from npz file
lowerCAmelCase = os.path.join(self.tmpdirname , '''file.npz''' )
np.savez(UpperCAmelCase_ , **UpperCAmelCase_ )
lowerCAmelCase = processor(text=self.input_string , voice_preset=UpperCAmelCase_ )
lowerCAmelCase = inputs['''history_prompt''']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(UpperCAmelCase_ , np.array([] ) ).tolist() )
# test loading voice preset from the hub
lowerCAmelCase = processor(text=self.input_string , voice_preset=self.voice_preset )
def __snake_case ( self ):
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = BarkProcessor(tokenizer=UpperCAmelCase_ )
lowerCAmelCase = processor(text=self.input_string )
lowerCAmelCase = tokenizer(
self.input_string , padding='''max_length''' , max_length=2_56 , add_special_tokens=UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ , return_token_type_ids=UpperCAmelCase_ , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 711
|
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
UpperCAmelCase_ =datasets.utils.logging.get_logger(__name__)
@dataclass
class __UpperCamelCase ( datasets.BuilderConfig ):
'''simple docstring'''
__a : Optional[datasets.Features] =None
__a : str ="utf-8"
__a : Optional[str] =None
__a : Optional[str] =None
__a : bool =True # deprecated
__a : Optional[int] =None # deprecated
__a : int =1_0 << 2_0 # 10MB
__a : Optional[bool] =None
class __UpperCamelCase ( datasets.ArrowBasedBuilder ):
'''simple docstring'''
__a : str =JsonConfig
def __snake_case ( self ):
if self.config.block_size is not None:
logger.warning('''The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead''' )
lowerCAmelCase = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
'''The JSON loader parameter `use_threads` is deprecated and doesn\'t have any effect anymore.''' )
if self.config.newlines_in_values is not None:
raise ValueError('''The JSON loader parameter `newlines_in_values` is no longer supported''' )
return datasets.DatasetInfo(features=self.config.features )
def __snake_case ( self , UpperCAmelCase_ ):
if not self.config.data_files:
raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
lowerCAmelCase = dl_manager.download_and_extract(self.config.data_files )
if isinstance(UpperCAmelCase_ , (str, list, tuple) ):
lowerCAmelCase = data_files
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
lowerCAmelCase = [files]
lowerCAmelCase = [dl_manager.iter_files(UpperCAmelCase_ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )]
lowerCAmelCase = []
for split_name, files in data_files.items():
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
lowerCAmelCase = [files]
lowerCAmelCase = [dl_manager.iter_files(UpperCAmelCase_ ) for file in files]
splits.append(datasets.SplitGenerator(name=UpperCAmelCase_ , gen_kwargs={'''files''': files} ) )
return splits
def __snake_case ( self , UpperCAmelCase_ ):
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features ) - set(pa_table.column_names ):
lowerCAmelCase = self.config.features.arrow_schema.field(UpperCAmelCase_ ).type
lowerCAmelCase = pa_table.append_column(UpperCAmelCase_ , pa.array([None] * len(UpperCAmelCase_ ) , type=UpperCAmelCase_ ) )
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
lowerCAmelCase = table_cast(UpperCAmelCase_ , self.config.features.arrow_schema )
return pa_table
def __snake_case ( self , UpperCAmelCase_ ):
for file_idx, file in enumerate(itertools.chain.from_iterable(UpperCAmelCase_ ) ):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(UpperCAmelCase_ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
lowerCAmelCase = json.load(UpperCAmelCase_ )
# We keep only the field we are interested in
lowerCAmelCase = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(UpperCAmelCase_ , (list, tuple) ):
lowerCAmelCase = set().union(*[row.keys() for row in dataset] )
lowerCAmelCase = {col: [row.get(UpperCAmelCase_ ) for row in dataset] for col in keys}
else:
lowerCAmelCase = dataset
lowerCAmelCase = pa.Table.from_pydict(UpperCAmelCase_ )
yield file_idx, self._cast_table(UpperCAmelCase_ )
# If the file has one json object per line
else:
with open(UpperCAmelCase_ , '''rb''' ) as f:
lowerCAmelCase = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
lowerCAmelCase = max(self.config.chunksize // 32 , 16 << 10 )
lowerCAmelCase = (
self.config.encoding_errors if self.config.encoding_errors is not None else '''strict'''
)
while True:
lowerCAmelCase = f.read(self.config.chunksize )
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(UpperCAmelCase_ )
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
lowerCAmelCase = batch.decode(self.config.encoding , errors=UpperCAmelCase_ ).encode('''utf-8''' )
try:
while True:
try:
lowerCAmelCase = paj.read_json(
io.BytesIO(UpperCAmelCase_ ) , read_options=paj.ReadOptions(block_size=UpperCAmelCase_ ) )
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(UpperCAmelCase_ , pa.ArrowInvalid )
and "straddling" not in str(UpperCAmelCase_ )
or block_size > len(UpperCAmelCase_ )
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
F"""Batch of {len(UpperCAmelCase_ )} bytes couldn't be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.""" )
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
UpperCAmelCase_ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
lowerCAmelCase = json.load(UpperCAmelCase_ )
except json.JSONDecodeError:
logger.error(F"""Failed to read file '{file}' with error {type(UpperCAmelCase_ )}: {e}""" )
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): # list is the only sequence type supported in JSON
try:
lowerCAmelCase = set().union(*[row.keys() for row in dataset] )
lowerCAmelCase = {col: [row.get(UpperCAmelCase_ ) for row in dataset] for col in keys}
lowerCAmelCase = pa.Table.from_pydict(UpperCAmelCase_ )
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(F"""Failed to read file '{file}' with error {type(UpperCAmelCase_ )}: {e}""" )
raise ValueError(F"""Not able to read records in the JSON file at {file}.""" ) from None
yield file_idx, self._cast_table(UpperCAmelCase_ )
break
else:
logger.error(F"""Failed to read file '{file}' with error {type(UpperCAmelCase_ )}: {e}""" )
raise ValueError(
F"""Not able to read records in the JSON file at {file}. """
F"""You should probably indicate the field of the JSON file containing your records. """
F"""This JSON file contain the following fields: {str(list(dataset.keys() ) )}. """
F"""Select the correct one and provide it as `field='XXX'` to the dataset loading method. """ ) from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(UpperCAmelCase_ )
batch_idx += 1
| 33
| 0
|
import pickle
import numpy as np
from matplotlib import pyplot as plt
class __UpperCamelCase :
'''simple docstring'''
def __init__( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=0.2 , UpperCAmelCase_=0.2 ):
lowerCAmelCase = bp_numa
lowerCAmelCase = bp_numa
lowerCAmelCase = bp_numa
lowerCAmelCase = conva_get[:2]
lowerCAmelCase = conva_get[2]
lowerCAmelCase = size_pa
lowerCAmelCase = rate_w
lowerCAmelCase = rate_t
lowerCAmelCase = [
np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0] ) + 0.5 )
for i in range(self.conva[1] )
]
lowerCAmelCase = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
lowerCAmelCase = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
lowerCAmelCase = -2 * np.random.rand(self.conva[1] ) + 1
lowerCAmelCase = -2 * np.random.rand(self.num_bpa ) + 1
lowerCAmelCase = -2 * np.random.rand(self.num_bpa ) + 1
def __snake_case ( self , UpperCAmelCase_ ):
# save model dict with pickle
lowerCAmelCase = {
'''num_bp1''': self.num_bpa,
'''num_bp2''': self.num_bpa,
'''num_bp3''': self.num_bpa,
'''conv1''': self.conva,
'''step_conv1''': self.step_conva,
'''size_pooling1''': self.size_poolinga,
'''rate_weight''': self.rate_weight,
'''rate_thre''': self.rate_thre,
'''w_conv1''': self.w_conva,
'''wkj''': self.wkj,
'''vji''': self.vji,
'''thre_conv1''': self.thre_conva,
'''thre_bp2''': self.thre_bpa,
'''thre_bp3''': self.thre_bpa,
}
with open(UpperCAmelCase_ , '''wb''' ) as f:
pickle.dump(UpperCAmelCase_ , UpperCAmelCase_ )
print(F"""Model saved: {save_path}""" )
@classmethod
def __snake_case ( cls , UpperCAmelCase_ ):
# read saved model
with open(UpperCAmelCase_ , '''rb''' ) as f:
lowerCAmelCase = pickle.load(UpperCAmelCase_ ) # noqa: S301
lowerCAmelCase = model_dic.get('''conv1''' )
conv_get.append(model_dic.get('''step_conv1''' ) )
lowerCAmelCase = model_dic.get('''size_pooling1''' )
lowerCAmelCase = model_dic.get('''num_bp1''' )
lowerCAmelCase = model_dic.get('''num_bp2''' )
lowerCAmelCase = model_dic.get('''num_bp3''' )
lowerCAmelCase = model_dic.get('''rate_weight''' )
lowerCAmelCase = model_dic.get('''rate_thre''' )
# create model instance
lowerCAmelCase = CNN(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# modify model parameter
lowerCAmelCase = model_dic.get('''w_conv1''' )
lowerCAmelCase = model_dic.get('''wkj''' )
lowerCAmelCase = model_dic.get('''vji''' )
lowerCAmelCase = model_dic.get('''thre_conv1''' )
lowerCAmelCase = model_dic.get('''thre_bp2''' )
lowerCAmelCase = model_dic.get('''thre_bp3''' )
return conv_ins
def __snake_case ( self , UpperCAmelCase_ ):
return 1 / (1 + np.exp(-1 * x ))
def __snake_case ( self , UpperCAmelCase_ ):
return round(UpperCAmelCase_ , 3 )
def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
# convolution process
lowerCAmelCase = convs[0]
lowerCAmelCase = convs[1]
lowerCAmelCase = np.shape(UpperCAmelCase_ )[0]
# get the data slice of original image data, data_focus
lowerCAmelCase = []
for i_focus in range(0 , size_data - size_conv + 1 , UpperCAmelCase_ ):
for j_focus in range(0 , size_data - size_conv + 1 , UpperCAmelCase_ ):
lowerCAmelCase = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(UpperCAmelCase_ )
# calculate the feature map of every single kernel, and saved as list of matrix
lowerCAmelCase = []
lowerCAmelCase = int((size_data - size_conv) / conv_step + 1 )
for i_map in range(UpperCAmelCase_ ):
lowerCAmelCase = []
for i_focus in range(len(UpperCAmelCase_ ) ):
lowerCAmelCase = (
np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map] ) )
- thre_convs[i_map]
)
featuremap.append(self.sig(UpperCAmelCase_ ) )
lowerCAmelCase = np.asmatrix(UpperCAmelCase_ ).reshape(
UpperCAmelCase_ , UpperCAmelCase_ )
data_featuremap.append(UpperCAmelCase_ )
# expanding the data slice to One dimenssion
lowerCAmelCase = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(UpperCAmelCase_ ) )
lowerCAmelCase = np.asarray(UpperCAmelCase_ )
return focus_list, data_featuremap
def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_="average_pool" ):
# pooling process
lowerCAmelCase = len(featuremaps[0] )
lowerCAmelCase = int(size_map / size_pooling )
lowerCAmelCase = []
for i_map in range(len(UpperCAmelCase_ ) ):
lowerCAmelCase = featuremaps[i_map]
lowerCAmelCase = []
for i_focus in range(0 , UpperCAmelCase_ , UpperCAmelCase_ ):
for j_focus in range(0 , UpperCAmelCase_ , UpperCAmelCase_ ):
lowerCAmelCase = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(UpperCAmelCase_ ) )
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(UpperCAmelCase_ ) )
lowerCAmelCase = np.asmatrix(UpperCAmelCase_ ).reshape(UpperCAmelCase_ , UpperCAmelCase_ )
featuremap_pooled.append(UpperCAmelCase_ )
return featuremap_pooled
def __snake_case ( self , UpperCAmelCase_ ):
# expanding three dimension data to one dimension list
lowerCAmelCase = []
for i in range(len(UpperCAmelCase_ ) ):
lowerCAmelCase = np.shape(data[i] )
lowerCAmelCase = data[i].reshape(1 , shapes[0] * shapes[1] )
lowerCAmelCase = data_listed.getA().tolist()[0]
data_expanded.extend(UpperCAmelCase_ )
lowerCAmelCase = np.asarray(UpperCAmelCase_ )
return data_expanded
def __snake_case ( self , UpperCAmelCase_ ):
# expanding matrix to one dimension list
lowerCAmelCase = np.asarray(UpperCAmelCase_ )
lowerCAmelCase = np.shape(UpperCAmelCase_ )
lowerCAmelCase = data_mat.reshape(1 , shapes[0] * shapes[1] )
return data_expanded
def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
lowerCAmelCase = []
lowerCAmelCase = 0
for i_map in range(UpperCAmelCase_ ):
lowerCAmelCase = np.ones((size_map, size_map) )
for i in range(0 , UpperCAmelCase_ , UpperCAmelCase_ ):
for j in range(0 , UpperCAmelCase_ , UpperCAmelCase_ ):
lowerCAmelCase = pd_pool[
i_pool
]
lowerCAmelCase = i_pool + 1
lowerCAmelCase = np.multiply(
UpperCAmelCase_ , np.multiply(out_map[i_map] , (1 - out_map[i_map]) ) )
pd_all.append(UpperCAmelCase_ )
return pd_all
def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=bool ):
# model traning
print('''----------------------Start Training-------------------------''' )
print((''' - - Shape: Train_Data ''', np.shape(UpperCAmelCase_ )) )
print((''' - - Shape: Teach_Data ''', np.shape(UpperCAmelCase_ )) )
lowerCAmelCase = 0
lowerCAmelCase = []
lowerCAmelCase = 1_00_00
while rp < n_repeat and mse >= error_accuracy:
lowerCAmelCase = 0
print(F"""-------------Learning Time {rp}--------------""" )
for p in range(len(UpperCAmelCase_ ) ):
# print('------------Learning Image: %d--------------'%p)
lowerCAmelCase = np.asmatrix(datas_train[p] )
lowerCAmelCase = np.asarray(datas_teach[p] )
lowerCAmelCase , lowerCAmelCase = self.convolute(
UpperCAmelCase_ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
lowerCAmelCase = self.pooling(UpperCAmelCase_ , self.size_poolinga )
lowerCAmelCase = np.shape(UpperCAmelCase_ )
lowerCAmelCase = self._expand(UpperCAmelCase_ )
lowerCAmelCase = data_bp_input
lowerCAmelCase = np.dot(UpperCAmelCase_ , self.vji.T ) - self.thre_bpa
lowerCAmelCase = self.sig(UpperCAmelCase_ )
lowerCAmelCase = np.dot(UpperCAmelCase_ , self.wkj.T ) - self.thre_bpa
lowerCAmelCase = self.sig(UpperCAmelCase_ )
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
lowerCAmelCase = np.multiply(
(data_teach - bp_outa) , np.multiply(UpperCAmelCase_ , (1 - bp_outa) ) )
lowerCAmelCase = np.multiply(
np.dot(UpperCAmelCase_ , self.wkj ) , np.multiply(UpperCAmelCase_ , (1 - bp_outa) ) )
lowerCAmelCase = np.dot(UpperCAmelCase_ , self.vji )
lowerCAmelCase = pd_i_all / (self.size_poolinga * self.size_poolinga)
lowerCAmelCase = pd_conva_pooled.T.getA().tolist()
lowerCAmelCase = self._calculate_gradient_from_pool(
UpperCAmelCase_ , UpperCAmelCase_ , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , )
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1] ):
lowerCAmelCase = self._expand_mat(pd_conva_all[k_conv] )
lowerCAmelCase = self.rate_weight * np.dot(UpperCAmelCase_ , UpperCAmelCase_ )
lowerCAmelCase = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]) )
lowerCAmelCase = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv] ) * self.rate_thre
)
# all connected layer
lowerCAmelCase = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
lowerCAmelCase = self.vji + pd_j_all.T * bp_outa * self.rate_weight
lowerCAmelCase = self.thre_bpa - pd_k_all * self.rate_thre
lowerCAmelCase = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
lowerCAmelCase = np.sum(abs(data_teach - bp_outa ) )
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
lowerCAmelCase = rp + 1
lowerCAmelCase = error_count / patterns
all_mse.append(UpperCAmelCase_ )
def draw_error():
lowerCAmelCase = [error_accuracy for i in range(int(n_repeat * 1.2 ) )]
plt.plot(UpperCAmelCase_ , '''+-''' )
plt.plot(UpperCAmelCase_ , '''r--''' )
plt.xlabel('''Learning Times''' )
plt.ylabel('''All_mse''' )
plt.grid(UpperCAmelCase_ , alpha=0.5 )
plt.show()
print('''------------------Training Complished---------------------''' )
print((''' - - Training epoch: ''', rp, F""" - - Mse: {mse:.6f}""") )
if draw_e:
draw_error()
return mse
def __snake_case ( self , UpperCAmelCase_ ):
# model predict
lowerCAmelCase = []
print('''-------------------Start Testing-------------------------''' )
print((''' - - Shape: Test_Data ''', np.shape(UpperCAmelCase_ )) )
for p in range(len(UpperCAmelCase_ ) ):
lowerCAmelCase = np.asmatrix(datas_test[p] )
lowerCAmelCase , lowerCAmelCase = self.convolute(
UpperCAmelCase_ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
lowerCAmelCase = self.pooling(UpperCAmelCase_ , self.size_poolinga )
lowerCAmelCase = self._expand(UpperCAmelCase_ )
lowerCAmelCase = data_bp_input
lowerCAmelCase = bp_outa * self.vji.T - self.thre_bpa
lowerCAmelCase = self.sig(UpperCAmelCase_ )
lowerCAmelCase = bp_outa * self.wkj.T - self.thre_bpa
lowerCAmelCase = self.sig(UpperCAmelCase_ )
produce_out.extend(bp_outa.getA().tolist() )
lowerCAmelCase = [list(map(self.do_round , UpperCAmelCase_ ) ) for each in produce_out]
return np.asarray(UpperCAmelCase_ )
def __snake_case ( self , UpperCAmelCase_ ):
# return the data of image after convoluting process so we can check it out
lowerCAmelCase = np.asmatrix(UpperCAmelCase_ )
lowerCAmelCase , lowerCAmelCase = self.convolute(
UpperCAmelCase_ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
lowerCAmelCase = self.pooling(UpperCAmelCase_ , self.size_poolinga )
return data_conveda, data_pooleda
if __name__ == "__main__":
pass
| 712
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
UpperCAmelCase_ =logging.get_logger(__name__)
class __UpperCamelCase ( __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__a : Optional[Any] ="""maskformer-swin"""
__a : Optional[int] ={
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self , UpperCAmelCase_=2_24 , UpperCAmelCase_=4 , UpperCAmelCase_=3 , UpperCAmelCase_=96 , UpperCAmelCase_=[2, 2, 6, 2] , UpperCAmelCase_=[3, 6, 12, 24] , UpperCAmelCase_=7 , UpperCAmelCase_=4.0 , UpperCAmelCase_=True , UpperCAmelCase_=0.0 , UpperCAmelCase_=0.0 , UpperCAmelCase_=0.1 , UpperCAmelCase_="gelu" , UpperCAmelCase_=False , UpperCAmelCase_=0.02 , UpperCAmelCase_=1E-5 , UpperCAmelCase_=None , UpperCAmelCase_=None , **UpperCAmelCase_ , ):
super().__init__(**UpperCAmelCase_ )
lowerCAmelCase = image_size
lowerCAmelCase = patch_size
lowerCAmelCase = num_channels
lowerCAmelCase = embed_dim
lowerCAmelCase = depths
lowerCAmelCase = len(UpperCAmelCase_ )
lowerCAmelCase = num_heads
lowerCAmelCase = window_size
lowerCAmelCase = mlp_ratio
lowerCAmelCase = qkv_bias
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = drop_path_rate
lowerCAmelCase = hidden_act
lowerCAmelCase = use_absolute_embeddings
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowerCAmelCase = int(embed_dim * 2 ** (len(UpperCAmelCase_ ) - 1) )
lowerCAmelCase = ['''stem'''] + [F"""stage{idx}""" for idx in range(1 , len(UpperCAmelCase_ ) + 1 )]
lowerCAmelCase , lowerCAmelCase = get_aligned_output_features_output_indices(
out_features=UpperCAmelCase_ , out_indices=UpperCAmelCase_ , stage_names=self.stage_names )
| 33
| 0
|
from __future__ import annotations
def UpperCAmelCase ( _snake_case , _snake_case = None , _snake_case = None ):
if start is None:
lowerCAmelCase = 0
if end is None:
lowerCAmelCase = len(_snake_case ) - 1
if start >= end:
return
lowerCAmelCase = (start + end) // 2
slowsort(_snake_case , _snake_case , _snake_case )
slowsort(_snake_case , mid + 1 , _snake_case )
if sequence[end] < sequence[mid]:
lowerCAmelCase , lowerCAmelCase = sequence[mid], sequence[end]
slowsort(_snake_case , _snake_case , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 713
|
from collections.abc import Sequence
def UpperCAmelCase ( _snake_case , _snake_case = False ):
if not arr:
return 0
lowerCAmelCase = 0 if allow_empty_subarrays else float('''-inf''' )
lowerCAmelCase = 0.0
for num in arr:
lowerCAmelCase = max(0 if allow_empty_subarrays else num , curr_sum + num )
lowerCAmelCase = max(_snake_case , _snake_case )
return max_sum
if __name__ == "__main__":
from doctest import testmod
testmod()
UpperCAmelCase_ =[-2, 1, -3, 4, -1, 2, 1, -5, 4]
print(F'''{max_subarray_sum(nums) = }''')
| 33
| 0
|
import warnings
from ...utils import logging
from .image_processing_flava import FlavaImageProcessor
UpperCAmelCase_ =logging.get_logger(__name__)
class __UpperCamelCase ( __UpperCAmelCase ):
'''simple docstring'''
def __init__( self , *UpperCAmelCase_ , **UpperCAmelCase_ ):
warnings.warn(
'''The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use FlavaImageProcessor instead.''' , UpperCAmelCase_ , )
super().__init__(*UpperCAmelCase_ , **UpperCAmelCase_ )
| 714
|
import os
import pickle
import unittest
from transformers import AutoTokenizer
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.models.bert_japanese.tokenization_bert_japanese import (
VOCAB_FILES_NAMES,
BertJapaneseTokenizer,
CharacterTokenizer,
JumanppTokenizer,
MecabTokenizer,
SudachiTokenizer,
WordpieceTokenizer,
)
from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi
from ...test_tokenization_common import TokenizerTesterMixin
@custom_tokenizers
class __UpperCamelCase ( __UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
__a : Any =BertJapaneseTokenizer
__a : Optional[int] =False
__a : int =True
def __snake_case ( self ):
super().setUp()
lowerCAmelCase = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''こんにちは''',
'''こん''',
'''にちは''',
'''ばんは''',
'''##こん''',
'''##にちは''',
'''##ばんは''',
'''世界''',
'''##世界''',
'''、''',
'''##、''',
'''。''',
'''##。''',
]
lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def __snake_case ( self , UpperCAmelCase_ ):
lowerCAmelCase = '''こんにちは、世界。 \nこんばんは、世界。'''
lowerCAmelCase = '''こんにちは 、 世界 。 こんばんは 、 世界 。'''
return input_text, output_text
def __snake_case ( self , UpperCAmelCase_ ):
lowerCAmelCase , lowerCAmelCase = self.get_input_output_texts(UpperCAmelCase_ )
lowerCAmelCase = tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ )
lowerCAmelCase = tokenizer.decode(UpperCAmelCase_ , clean_up_tokenization_spaces=UpperCAmelCase_ )
return text, ids
def __snake_case ( self ):
pass # TODO add if relevant
def __snake_case ( self ):
pass # TODO add if relevant
def __snake_case ( self ):
pass # TODO add if relevant
def __snake_case ( self ):
lowerCAmelCase = self.tokenizer_class(self.vocab_file )
lowerCAmelCase = tokenizer.tokenize('''こんにちは、世界。\nこんばんは、世界。''' )
self.assertListEqual(UpperCAmelCase_ , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
def __snake_case ( self ):
lowerCAmelCase = self.tokenizer_class(self.vocab_file , word_tokenizer_type='''mecab''' )
self.assertIsNotNone(UpperCAmelCase_ )
lowerCAmelCase = '''こんにちは、世界。\nこんばんは、世界。'''
lowerCAmelCase = tokenizer.tokenize(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
lowerCAmelCase = os.path.join(self.tmpdirname , '''tokenizer.bin''' )
with open(UpperCAmelCase_ , '''wb''' ) as handle:
pickle.dump(UpperCAmelCase_ , UpperCAmelCase_ )
with open(UpperCAmelCase_ , '''rb''' ) as handle:
lowerCAmelCase = pickle.load(UpperCAmelCase_ )
lowerCAmelCase = tokenizer_new.tokenize(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def __snake_case ( self ):
lowerCAmelCase = MecabTokenizer(mecab_dic='''ipadic''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップルストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , )
def __snake_case ( self ):
try:
lowerCAmelCase = MecabTokenizer(mecab_dic='''unidic_lite''' )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , )
def __snake_case ( self ):
try:
lowerCAmelCase = MecabTokenizer(mecab_dic='''unidic''' )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , )
def __snake_case ( self ):
lowerCAmelCase = MecabTokenizer(do_lower_case=UpperCAmelCase_ , mecab_dic='''ipadic''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップルストア''', '''で''', '''iphone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , )
def __snake_case ( self ):
try:
lowerCAmelCase = MecabTokenizer(
do_lower_case=UpperCAmelCase_ , normalize_text=UpperCAmelCase_ , mecab_option='''-d /usr/local/lib/mecab/dic/jumandic''' )
except RuntimeError:
# if dict doesn't exist in the system, previous code raises this error.
return
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップルストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , )
def __snake_case ( self ):
lowerCAmelCase = MecabTokenizer(normalize_text=UpperCAmelCase_ , mecab_dic='''ipadic''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップルストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', ''' ''', '''。'''] , )
@require_sudachi
def __snake_case ( self ):
lowerCAmelCase = self.tokenizer_class(self.vocab_file , word_tokenizer_type='''sudachi''' )
self.assertIsNotNone(UpperCAmelCase_ )
lowerCAmelCase = '''こんにちは、世界。\nこんばんは、世界。'''
lowerCAmelCase = tokenizer.tokenize(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
lowerCAmelCase = os.path.join(self.tmpdirname , '''tokenizer.bin''' )
with open(UpperCAmelCase_ , '''wb''' ) as handle:
pickle.dump(UpperCAmelCase_ , UpperCAmelCase_ )
with open(UpperCAmelCase_ , '''rb''' ) as handle:
lowerCAmelCase = pickle.load(UpperCAmelCase_ )
lowerCAmelCase = tokenizer_new.tokenize(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
@require_sudachi
def __snake_case ( self ):
lowerCAmelCase = SudachiTokenizer(sudachi_dict_type='''core''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , [''' ''', '''\t''', '''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', ''' ''', '''が''', ''' ''', ''' ''', '''\n ''', '''発売''', '''さ''', '''れ''', '''た''', ''' ''', '''。''', ''' ''', ''' '''] , )
@require_sudachi
def __snake_case ( self ):
lowerCAmelCase = SudachiTokenizer(sudachi_dict_type='''core''' , sudachi_split_mode='''A''' )
self.assertListEqual(tokenizer.tokenize('''外国人参政権''' ) , ['''外国''', '''人''', '''参政''', '''権'''] )
@require_sudachi
def __snake_case ( self ):
lowerCAmelCase = SudachiTokenizer(sudachi_dict_type='''core''' , sudachi_split_mode='''B''' )
self.assertListEqual(tokenizer.tokenize('''外国人参政権''' ) , ['''外国人''', '''参政権'''] )
@require_sudachi
def __snake_case ( self ):
lowerCAmelCase = SudachiTokenizer(sudachi_dict_type='''core''' , sudachi_split_mode='''C''' )
self.assertListEqual(tokenizer.tokenize('''外国人参政権''' ) , ['''外国人参政権'''] )
@require_sudachi
def __snake_case ( self ):
lowerCAmelCase = SudachiTokenizer(do_lower_case=UpperCAmelCase_ , sudachi_dict_type='''core''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , [''' ''', '''\t''', '''アップル''', '''ストア''', '''で''', '''iphone''', '''8''', ''' ''', '''が''', ''' ''', ''' ''', '''\n ''', '''発売''', '''さ''', '''れ''', '''た''', ''' ''', '''。''', ''' ''', ''' '''] , )
@require_sudachi
def __snake_case ( self ):
lowerCAmelCase = SudachiTokenizer(normalize_text=UpperCAmelCase_ , sudachi_dict_type='''core''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , [''' ''', '''\t''', '''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', ''' ''', '''が''', ''' ''', ''' ''', '''\n ''', '''発売''', '''さ''', '''れ''', '''た''', '''\u3000''', '''。''', ''' ''', ''' '''] , )
@require_sudachi
def __snake_case ( self ):
lowerCAmelCase = SudachiTokenizer(trim_whitespace=UpperCAmelCase_ , sudachi_dict_type='''core''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , )
@require_jumanpp
def __snake_case ( self ):
lowerCAmelCase = self.tokenizer_class(self.vocab_file , word_tokenizer_type='''jumanpp''' )
self.assertIsNotNone(UpperCAmelCase_ )
lowerCAmelCase = '''こんにちは、世界。\nこんばんは、世界。'''
lowerCAmelCase = tokenizer.tokenize(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
lowerCAmelCase = os.path.join(self.tmpdirname , '''tokenizer.bin''' )
with open(UpperCAmelCase_ , '''wb''' ) as handle:
pickle.dump(UpperCAmelCase_ , UpperCAmelCase_ )
with open(UpperCAmelCase_ , '''rb''' ) as handle:
lowerCAmelCase = pickle.load(UpperCAmelCase_ )
lowerCAmelCase = tokenizer_new.tokenize(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
@require_jumanpp
def __snake_case ( self ):
lowerCAmelCase = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''\u3000''', '''が''', '''\u3000''', '''\u3000''', '''\u3000''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , )
@require_jumanpp
def __snake_case ( self ):
lowerCAmelCase = JumanppTokenizer(do_lower_case=UpperCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iphone''', '''8''', '''\u3000''', '''が''', '''\u3000''', '''\u3000''', '''\u3000''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , )
@require_jumanpp
def __snake_case ( self ):
lowerCAmelCase = JumanppTokenizer(normalize_text=UpperCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''ア''', '''ッ''', '''フ''', '''゚''', '''ル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''\u3000''', '''が''', '''\u3000''', '''\u3000''', '''\u3000''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , )
@require_jumanpp
def __snake_case ( self ):
lowerCAmelCase = JumanppTokenizer(trim_whitespace=UpperCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れた''', '''。'''] , )
@require_jumanpp
def __snake_case ( self ):
lowerCAmelCase = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize('''ありがとうございますm(_ _)m見つけるのが大変です。''' ) , ['''ありがとう''', '''ございます''', '''m(_ _)m''', '''見つける''', '''の''', '''が''', '''大変です''', '''。'''] , )
def __snake_case ( self ):
lowerCAmelCase = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''こんにちは''', '''こん''', '''にちは''', '''ばんは''', '''##こん''', '''##にちは''', '''##ばんは''']
lowerCAmelCase = {}
for i, token in enumerate(UpperCAmelCase_ ):
lowerCAmelCase = i
lowerCAmelCase = WordpieceTokenizer(vocab=UpperCAmelCase_ , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''こんにちは''' ) , ['''こんにちは'''] )
self.assertListEqual(tokenizer.tokenize('''こんばんは''' ) , ['''こん''', '''##ばんは'''] )
self.assertListEqual(tokenizer.tokenize('''こんばんは こんばんにちは こんにちは''' ) , ['''こん''', '''##ばんは''', '''[UNK]''', '''こんにちは'''] )
def __snake_case ( self ):
lowerCAmelCase = BertJapaneseTokenizer.from_pretrained('''nlp-waseda/roberta-base-japanese-with-auto-jumanpp''' )
lowerCAmelCase = tokenizer.subword_tokenizer
lowerCAmelCase = subword_tokenizer.tokenize('''国境 の 長い トンネル を 抜ける と 雪国 であった 。''' )
self.assertListEqual(UpperCAmelCase_ , ['''▁国境''', '''▁の''', '''▁長い''', '''▁トンネル''', '''▁を''', '''▁抜ける''', '''▁と''', '''▁雪''', '''国''', '''▁であった''', '''▁。'''] )
lowerCAmelCase = subword_tokenizer.tokenize('''こんばんは こんばん にち は こんにちは''' )
self.assertListEqual(UpperCAmelCase_ , ['''▁こん''', '''ばん''', '''は''', '''▁こん''', '''ばん''', '''▁に''', '''ち''', '''▁は''', '''▁こんにちは'''] )
def __snake_case ( self ):
lowerCAmelCase = self.tokenizer_class.from_pretrained('''cl-tohoku/bert-base-japanese''' )
lowerCAmelCase = tokenizer.encode('''ありがとう。''' , add_special_tokens=UpperCAmelCase_ )
lowerCAmelCase = tokenizer.encode('''どういたしまして。''' , add_special_tokens=UpperCAmelCase_ )
lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_ )
lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_ , UpperCAmelCase_ )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class __UpperCamelCase ( __UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
__a : Union[str, Any] =BertJapaneseTokenizer
__a : Optional[int] =False
def __snake_case ( self ):
super().setUp()
lowerCAmelCase = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''こ''', '''ん''', '''に''', '''ち''', '''は''', '''ば''', '''世''', '''界''', '''、''', '''。''']
lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def __snake_case ( self , **UpperCAmelCase_ ):
return BertJapaneseTokenizer.from_pretrained(self.tmpdirname , subword_tokenizer_type='''character''' , **UpperCAmelCase_ )
def __snake_case ( self , UpperCAmelCase_ ):
lowerCAmelCase = '''こんにちは、世界。 \nこんばんは、世界。'''
lowerCAmelCase = '''こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。'''
return input_text, output_text
def __snake_case ( self ):
pass # TODO add if relevant
def __snake_case ( self ):
pass # TODO add if relevant
def __snake_case ( self ):
pass # TODO add if relevant
def __snake_case ( self ):
lowerCAmelCase = self.tokenizer_class(self.vocab_file , subword_tokenizer_type='''character''' )
lowerCAmelCase = tokenizer.tokenize('''こんにちは、世界。 \nこんばんは、世界。''' )
self.assertListEqual(
UpperCAmelCase_ , ['''こ''', '''ん''', '''に''', '''ち''', '''は''', '''、''', '''世''', '''界''', '''。''', '''こ''', '''ん''', '''ば''', '''ん''', '''は''', '''、''', '''世''', '''界''', '''。'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , [3, 4, 5, 6, 7, 11, 9, 10, 12, 3, 4, 8, 4, 7, 11, 9, 10, 12] )
def __snake_case ( self ):
lowerCAmelCase = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''こ''', '''ん''', '''に''', '''ち''', '''は''', '''ば''', '''世''', '''界''', '''、''', '''。''']
lowerCAmelCase = {}
for i, token in enumerate(UpperCAmelCase_ ):
lowerCAmelCase = i
lowerCAmelCase = CharacterTokenizer(vocab=UpperCAmelCase_ , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''こんにちは''' ) , ['''こ''', '''ん''', '''に''', '''ち''', '''は'''] )
self.assertListEqual(tokenizer.tokenize('''こんにちほ''' ) , ['''こ''', '''ん''', '''に''', '''ち''', '''[UNK]'''] )
def __snake_case ( self ):
lowerCAmelCase = self.tokenizer_class.from_pretrained('''cl-tohoku/bert-base-japanese-char''' )
lowerCAmelCase = tokenizer.encode('''ありがとう。''' , add_special_tokens=UpperCAmelCase_ )
lowerCAmelCase = tokenizer.encode('''どういたしまして。''' , add_special_tokens=UpperCAmelCase_ )
lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_ )
lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_ , UpperCAmelCase_ )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class __UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __snake_case ( self ):
lowerCAmelCase = '''cl-tohoku/bert-base-japanese'''
lowerCAmelCase = AutoTokenizer.from_pretrained(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
class __UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __snake_case ( self ):
lowerCAmelCase = '''cl-tohoku/bert-base-japanese'''
with self.assertLogs('''transformers''' , level='''WARNING''' ) as cm:
BertTokenizer.from_pretrained(UpperCAmelCase_ )
self.assertTrue(
cm.records[0].message.startswith(
'''The tokenizer class you load from this checkpoint is not the same type as the class this function'''
''' is called from.''' ) )
lowerCAmelCase = '''bert-base-cased'''
with self.assertLogs('''transformers''' , level='''WARNING''' ) as cm:
BertJapaneseTokenizer.from_pretrained(UpperCAmelCase_ )
self.assertTrue(
cm.records[0].message.startswith(
'''The tokenizer class you load from this checkpoint is not the same type as the class this function'''
''' is called from.''' ) )
| 33
| 0
|
from ..utils import DummyObject, requires_backends
class __UpperCamelCase ( metaclass=__UpperCAmelCase ):
'''simple docstring'''
__a : str =["""flax"""]
def __init__( self , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(self , ['''flax'''] )
@classmethod
def __snake_case ( cls , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(cls , ['''flax'''] )
@classmethod
def __snake_case ( cls , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(cls , ['''flax'''] )
class __UpperCamelCase ( metaclass=__UpperCAmelCase ):
'''simple docstring'''
__a : Dict =["""flax"""]
def __init__( self , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(self , ['''flax'''] )
@classmethod
def __snake_case ( cls , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(cls , ['''flax'''] )
@classmethod
def __snake_case ( cls , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(cls , ['''flax'''] )
class __UpperCamelCase ( metaclass=__UpperCAmelCase ):
'''simple docstring'''
__a : List[Any] =["""flax"""]
def __init__( self , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(self , ['''flax'''] )
@classmethod
def __snake_case ( cls , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(cls , ['''flax'''] )
@classmethod
def __snake_case ( cls , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(cls , ['''flax'''] )
class __UpperCamelCase ( metaclass=__UpperCAmelCase ):
'''simple docstring'''
__a : Optional[int] =["""flax"""]
def __init__( self , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(self , ['''flax'''] )
@classmethod
def __snake_case ( cls , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(cls , ['''flax'''] )
@classmethod
def __snake_case ( cls , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(cls , ['''flax'''] )
class __UpperCamelCase ( metaclass=__UpperCAmelCase ):
'''simple docstring'''
__a : List[Any] =["""flax"""]
def __init__( self , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(self , ['''flax'''] )
@classmethod
def __snake_case ( cls , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(cls , ['''flax'''] )
@classmethod
def __snake_case ( cls , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(cls , ['''flax'''] )
class __UpperCamelCase ( metaclass=__UpperCAmelCase ):
'''simple docstring'''
__a : Any =["""flax"""]
def __init__( self , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(self , ['''flax'''] )
@classmethod
def __snake_case ( cls , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(cls , ['''flax'''] )
@classmethod
def __snake_case ( cls , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(cls , ['''flax'''] )
class __UpperCamelCase ( metaclass=__UpperCAmelCase ):
'''simple docstring'''
__a : Dict =["""flax"""]
def __init__( self , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(self , ['''flax'''] )
@classmethod
def __snake_case ( cls , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(cls , ['''flax'''] )
@classmethod
def __snake_case ( cls , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(cls , ['''flax'''] )
class __UpperCamelCase ( metaclass=__UpperCAmelCase ):
'''simple docstring'''
__a : Union[str, Any] =["""flax"""]
def __init__( self , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(self , ['''flax'''] )
@classmethod
def __snake_case ( cls , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(cls , ['''flax'''] )
@classmethod
def __snake_case ( cls , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(cls , ['''flax'''] )
class __UpperCamelCase ( metaclass=__UpperCAmelCase ):
'''simple docstring'''
__a : List[str] =["""flax"""]
def __init__( self , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(self , ['''flax'''] )
@classmethod
def __snake_case ( cls , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(cls , ['''flax'''] )
@classmethod
def __snake_case ( cls , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(cls , ['''flax'''] )
class __UpperCamelCase ( metaclass=__UpperCAmelCase ):
'''simple docstring'''
__a : Any =["""flax"""]
def __init__( self , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(self , ['''flax'''] )
@classmethod
def __snake_case ( cls , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(cls , ['''flax'''] )
@classmethod
def __snake_case ( cls , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(cls , ['''flax'''] )
class __UpperCamelCase ( metaclass=__UpperCAmelCase ):
'''simple docstring'''
__a : Optional[Any] =["""flax"""]
def __init__( self , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(self , ['''flax'''] )
@classmethod
def __snake_case ( cls , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(cls , ['''flax'''] )
@classmethod
def __snake_case ( cls , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(cls , ['''flax'''] )
class __UpperCamelCase ( metaclass=__UpperCAmelCase ):
'''simple docstring'''
__a : Tuple =["""flax"""]
def __init__( self , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(self , ['''flax'''] )
@classmethod
def __snake_case ( cls , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(cls , ['''flax'''] )
@classmethod
def __snake_case ( cls , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(cls , ['''flax'''] )
class __UpperCamelCase ( metaclass=__UpperCAmelCase ):
'''simple docstring'''
__a : Optional[int] =["""flax"""]
def __init__( self , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(self , ['''flax'''] )
@classmethod
def __snake_case ( cls , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(cls , ['''flax'''] )
@classmethod
def __snake_case ( cls , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(cls , ['''flax'''] )
| 715
|
import json
import os
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from requests.exceptions import HTTPError
from transformers.utils import (
CONFIG_NAME,
FLAX_WEIGHTS_NAME,
TF2_WEIGHTS_NAME,
TRANSFORMERS_CACHE,
WEIGHTS_NAME,
cached_file,
get_file_from_repo,
has_file,
)
UpperCAmelCase_ ="""hf-internal-testing/tiny-random-bert"""
UpperCAmelCase_ =os.path.join(TRANSFORMERS_CACHE, """models--hf-internal-testing--tiny-random-bert""")
UpperCAmelCase_ ="""9b8c223d42b2188cb49d29af482996f9d0f3e5a6"""
class __UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __snake_case ( self ):
lowerCAmelCase = cached_file(UpperCAmelCase_ , UpperCAmelCase_ )
# Should have downloaded the file in here
self.assertTrue(os.path.isdir(UpperCAmelCase_ ) )
# Cache should contain at least those three subfolders:
for subfolder in ["blobs", "refs", "snapshots"]:
self.assertTrue(os.path.isdir(os.path.join(UpperCAmelCase_ , UpperCAmelCase_ ) ) )
with open(os.path.join(UpperCAmelCase_ , '''refs''' , '''main''' ) ) as f:
lowerCAmelCase = f.read()
self.assertEqual(UpperCAmelCase_ , os.path.join(UpperCAmelCase_ , '''snapshots''' , UpperCAmelCase_ , UpperCAmelCase_ ) )
self.assertTrue(os.path.isfile(UpperCAmelCase_ ) )
# File is cached at the same place the second time.
lowerCAmelCase = cached_file(UpperCAmelCase_ , UpperCAmelCase_ )
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ )
# Using a specific revision to test the full commit hash.
lowerCAmelCase = cached_file(UpperCAmelCase_ , UpperCAmelCase_ , revision='''9b8c223''' )
self.assertEqual(UpperCAmelCase_ , os.path.join(UpperCAmelCase_ , '''snapshots''' , UpperCAmelCase_ , UpperCAmelCase_ ) )
def __snake_case ( self ):
with self.assertRaisesRegex(UpperCAmelCase_ , '''is not a valid model identifier''' ):
lowerCAmelCase = cached_file('''tiny-random-bert''' , UpperCAmelCase_ )
with self.assertRaisesRegex(UpperCAmelCase_ , '''is not a valid git identifier''' ):
lowerCAmelCase = cached_file(UpperCAmelCase_ , UpperCAmelCase_ , revision='''aaaa''' )
with self.assertRaisesRegex(UpperCAmelCase_ , '''does not appear to have a file named''' ):
lowerCAmelCase = cached_file(UpperCAmelCase_ , '''conf''' )
def __snake_case ( self ):
with self.assertRaisesRegex(UpperCAmelCase_ , '''does not appear to have a file named''' ):
lowerCAmelCase = cached_file(UpperCAmelCase_ , '''conf''' )
with open(os.path.join(UpperCAmelCase_ , '''refs''' , '''main''' ) ) as f:
lowerCAmelCase = f.read()
self.assertTrue(os.path.isfile(os.path.join(UpperCAmelCase_ , '''.no_exist''' , UpperCAmelCase_ , '''conf''' ) ) )
lowerCAmelCase = cached_file(UpperCAmelCase_ , '''conf''' , _raise_exceptions_for_missing_entries=UpperCAmelCase_ )
self.assertIsNone(UpperCAmelCase_ )
lowerCAmelCase = cached_file(UpperCAmelCase_ , '''conf''' , local_files_only=UpperCAmelCase_ , _raise_exceptions_for_missing_entries=UpperCAmelCase_ )
self.assertIsNone(UpperCAmelCase_ )
lowerCAmelCase = mock.Mock()
lowerCAmelCase = 5_00
lowerCAmelCase = {}
lowerCAmelCase = HTTPError
lowerCAmelCase = {}
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('''requests.Session.request''' , return_value=UpperCAmelCase_ ) as mock_head:
lowerCAmelCase = cached_file(UpperCAmelCase_ , '''conf''' , _raise_exceptions_for_connection_errors=UpperCAmelCase_ )
self.assertIsNone(UpperCAmelCase_ )
# This check we did call the fake head request
mock_head.assert_called()
def __snake_case ( self ):
self.assertTrue(has_file('''hf-internal-testing/tiny-bert-pt-only''' , UpperCAmelCase_ ) )
self.assertFalse(has_file('''hf-internal-testing/tiny-bert-pt-only''' , UpperCAmelCase_ ) )
self.assertFalse(has_file('''hf-internal-testing/tiny-bert-pt-only''' , UpperCAmelCase_ ) )
def __snake_case ( self ):
# `get_file_from_repo` returns None if the file does not exist
self.assertIsNone(get_file_from_repo('''bert-base-cased''' , '''ahah.txt''' ) )
# The function raises if the repository does not exist.
with self.assertRaisesRegex(UpperCAmelCase_ , '''is not a valid model identifier''' ):
get_file_from_repo('''bert-base-case''' , UpperCAmelCase_ )
# The function raises if the revision does not exist.
with self.assertRaisesRegex(UpperCAmelCase_ , '''is not a valid git identifier''' ):
get_file_from_repo('''bert-base-cased''' , UpperCAmelCase_ , revision='''ahaha''' )
lowerCAmelCase = get_file_from_repo('''bert-base-cased''' , UpperCAmelCase_ )
# The name is the cached name which is not very easy to test, so instead we load the content.
lowerCAmelCase = json.loads(open(UpperCAmelCase_ , '''r''' ).read() )
self.assertEqual(config['''hidden_size'''] , 7_68 )
def __snake_case ( self ):
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCAmelCase = Path(UpperCAmelCase_ ) / '''a.txt'''
filename.touch()
self.assertEqual(get_file_from_repo(UpperCAmelCase_ , '''a.txt''' ) , str(UpperCAmelCase_ ) )
self.assertIsNone(get_file_from_repo(UpperCAmelCase_ , '''b.txt''' ) )
| 33
| 0
|
from __future__ import annotations
from fractions import Fraction
def UpperCAmelCase ( _snake_case , _snake_case ):
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def UpperCAmelCase ( _snake_case ):
lowerCAmelCase = []
lowerCAmelCase = 11
lowerCAmelCase = int('''1''' + '''0''' * digit_len )
for num in range(_snake_case , _snake_case ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(_snake_case , _snake_case ):
solutions.append(F"""{num}/{den}""" )
den += 1
num += 1
lowerCAmelCase = 10
return solutions
def UpperCAmelCase ( _snake_case = 2 ):
lowerCAmelCase = 1.0
for fraction in fraction_list(_snake_case ):
lowerCAmelCase = Fraction(_snake_case )
result *= frac.denominator / frac.numerator
return int(_snake_case )
if __name__ == "__main__":
print(solution())
| 716
|
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class __UpperCamelCase ( __UpperCAmelCase ):
'''simple docstring'''
def __init__( self , UpperCAmelCase_ , UpperCAmelCase_ = None , UpperCAmelCase_ = None , UpperCAmelCase_ = True , UpperCAmelCase_ = None , UpperCAmelCase_ = False , UpperCAmelCase_ = None , UpperCAmelCase_ = True , UpperCAmelCase_ = "arrow" , **UpperCAmelCase_ , ):
super().__init__(
split=UpperCAmelCase_ , features=UpperCAmelCase_ , cache_dir=UpperCAmelCase_ , keep_in_memory=UpperCAmelCase_ , streaming=UpperCAmelCase_ , **UpperCAmelCase_ , )
lowerCAmelCase = load_from_cache_file
lowerCAmelCase = file_format
lowerCAmelCase = Spark(
df=UpperCAmelCase_ , features=UpperCAmelCase_ , cache_dir=UpperCAmelCase_ , working_dir=UpperCAmelCase_ , **UpperCAmelCase_ , )
def __snake_case ( self ):
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
lowerCAmelCase = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=UpperCAmelCase_ , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split )
| 33
| 0
|
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("""TEST_SAGEMAKER""" , """False""" ) ) is not True , reason="""Skipping test because should only be run when releasing minor transformers version""" , )
@pytest.mark.usefixtures("""sm_env""" )
@parameterized_class(
[
{
"""framework""": """pytorch""",
"""script""": """run_glue_model_parallelism.py""",
"""model_name_or_path""": """roberta-large""",
"""instance_type""": """ml.p3dn.24xlarge""",
"""results""": {"""train_runtime""": 1_6_0_0, """eval_accuracy""": 0.3, """eval_loss""": 1.2},
},
{
"""framework""": """pytorch""",
"""script""": """run_glue.py""",
"""model_name_or_path""": """roberta-large""",
"""instance_type""": """ml.p3dn.24xlarge""",
"""results""": {"""train_runtime""": 1_6_0_0, """eval_accuracy""": 0.3, """eval_loss""": 1.2},
},
] )
class __UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __snake_case ( self ):
if self.framework == "pytorch":
subprocess.run(
F"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding='''utf-8''' , check=UpperCAmelCase_ , )
assert hasattr(self , '''env''' )
def __snake_case ( self , UpperCAmelCase_ ):
# configuration for running training on smdistributed Model Parallel
lowerCAmelCase = {
'''enabled''': True,
'''processes_per_host''': 8,
}
lowerCAmelCase = {
'''enabled''': True,
'''parameters''': {
'''microbatches''': 4,
'''placement_strategy''': '''spread''',
'''pipeline''': '''interleaved''',
'''optimize''': '''speed''',
'''partitions''': 4,
'''ddp''': True,
},
}
lowerCAmelCase = {'''smdistributed''': {'''modelparallel''': smp_options}, '''mpi''': mpi_options}
lowerCAmelCase = '''trainer''' if self.script == '''run_glue.py''' else '''smtrainer'''
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=F"""{self.env.base_job_name}-{instance_count}-smp-{name_extension}""" , instance_count=UpperCAmelCase_ , instance_type=self.instance_type , debugger_hook_config=UpperCAmelCase_ , hyperparameters={
**self.env.hyperparameters,
'''model_name_or_path''': self.model_name_or_path,
'''max_steps''': 5_00,
} , metric_definitions=self.env.metric_definitions , distribution=UpperCAmelCase_ , py_version='''py36''' , )
def __snake_case ( self , UpperCAmelCase_ ):
TrainingJobAnalytics(UpperCAmelCase_ ).export_csv(F"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(1,)] )
def __snake_case ( self , UpperCAmelCase_ ):
# create estimator
lowerCAmelCase = self.create_estimator(UpperCAmelCase_ )
# run training
estimator.fit()
# result dataframe
lowerCAmelCase = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
lowerCAmelCase = list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''] )
lowerCAmelCase = list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
lowerCAmelCase = (
Session().describe_training_job(estimator.latest_training_job.name ).get('''TrainingTimeInSeconds''' , 99_99_99 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy )
assert all(t <= self.results['''eval_loss'''] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F"""{estimator.latest_training_job.name}.json""" , '''w''' ) as outfile:
json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} , UpperCAmelCase_ )
| 717
|
import math
import numpy as np
import qiskit
from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute
def UpperCAmelCase ( _snake_case = 3 ):
if isinstance(_snake_case , _snake_case ):
raise TypeError('''number of qubits must be a integer.''' )
if number_of_qubits <= 0:
raise ValueError('''number of qubits must be > 0.''' )
if math.floor(_snake_case ) != number_of_qubits:
raise ValueError('''number of qubits must be exact integer.''' )
if number_of_qubits > 10:
raise ValueError('''number of qubits too large to simulate(>10).''' )
lowerCAmelCase = QuantumRegister(_snake_case , '''qr''' )
lowerCAmelCase = ClassicalRegister(_snake_case , '''cr''' )
lowerCAmelCase = QuantumCircuit(_snake_case , _snake_case )
lowerCAmelCase = number_of_qubits
for i in range(_snake_case ):
quantum_circuit.h(number_of_qubits - i - 1 )
counter -= 1
for j in range(_snake_case ):
quantum_circuit.cp(np.pi / 2 ** (counter - j) , _snake_case , _snake_case )
for k in range(number_of_qubits // 2 ):
quantum_circuit.swap(_snake_case , number_of_qubits - k - 1 )
# measure all the qubits
quantum_circuit.measure(_snake_case , _snake_case )
# simulate with 10000 shots
lowerCAmelCase = Aer.get_backend('''qasm_simulator''' )
lowerCAmelCase = execute(_snake_case , _snake_case , shots=10000 )
return job.result().get_counts(_snake_case )
if __name__ == "__main__":
print(
F'''Total count for quantum fourier transform state is: \
{quantum_fourier_transform(3)}'''
)
| 33
| 0
|
import json
import os
import pickle
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.tokenization_bart import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch
if is_faiss_available():
import faiss
@require_faiss
class __UpperCamelCase ( __UpperCAmelCase ):
'''simple docstring'''
def __snake_case ( self ):
lowerCAmelCase = tempfile.mkdtemp()
lowerCAmelCase = 8
# DPR tok
lowerCAmelCase = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
lowerCAmelCase = os.path.join(self.tmpdirname , '''dpr_tokenizer''' )
os.makedirs(UpperCAmelCase_ , exist_ok=UpperCAmelCase_ )
lowerCAmelCase = os.path.join(UpperCAmelCase_ , DPR_VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
# BART tok
lowerCAmelCase = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
lowerCAmelCase = dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_ ) ) ) )
lowerCAmelCase = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
lowerCAmelCase = {'''unk_token''': '''<unk>'''}
lowerCAmelCase = os.path.join(self.tmpdirname , '''bart_tokenizer''' )
os.makedirs(UpperCAmelCase_ , exist_ok=UpperCAmelCase_ )
lowerCAmelCase = os.path.join(UpperCAmelCase_ , BART_VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCAmelCase = os.path.join(UpperCAmelCase_ , BART_VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(UpperCAmelCase_ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(UpperCAmelCase_ ) )
def __snake_case ( self ):
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) )
def __snake_case ( self ):
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) )
def __snake_case ( self ):
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''bart_tokenizer''' ) )
def __snake_case ( self ):
shutil.rmtree(self.tmpdirname )
def __snake_case ( self ):
lowerCAmelCase = Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''text''': ['''foo''', '''bar'''],
'''title''': ['''Foo''', '''Bar'''],
'''embeddings''': [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )],
} )
dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT )
return dataset
def __snake_case ( self ):
lowerCAmelCase = self.get_dummy_dataset()
lowerCAmelCase = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , )
with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset:
lowerCAmelCase = dataset
lowerCAmelCase = RagRetriever(
UpperCAmelCase_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
return retriever
def __snake_case ( self , UpperCAmelCase_ ):
lowerCAmelCase = self.get_dummy_dataset()
lowerCAmelCase = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''custom''' , )
if from_disk:
lowerCAmelCase = os.path.join(self.tmpdirname , '''dataset''' )
lowerCAmelCase = os.path.join(self.tmpdirname , '''index.faiss''' )
dataset.get_index('''embeddings''' ).save(os.path.join(self.tmpdirname , '''index.faiss''' ) )
dataset.drop_index('''embeddings''' )
dataset.save_to_disk(os.path.join(self.tmpdirname , '''dataset''' ) )
del dataset
lowerCAmelCase = RagRetriever(
UpperCAmelCase_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
else:
lowerCAmelCase = RagRetriever(
UpperCAmelCase_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , UpperCAmelCase_ ) , )
return retriever
def __snake_case ( self ):
lowerCAmelCase = Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''text''': ['''foo''', '''bar'''],
'''title''': ['''Foo''', '''Bar'''],
'''embeddings''': [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )],
} )
dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT )
lowerCAmelCase = os.path.join(self.tmpdirname , '''hf_bert_base.hnswSQ8_correct_phi_128.c_index''' )
dataset.save_faiss_index('''embeddings''' , index_file_name + '''.index.dpr''' )
pickle.dump(dataset['''id'''] , open(index_file_name + '''.index_meta.dpr''' , '''wb''' ) )
lowerCAmelCase = os.path.join(self.tmpdirname , '''psgs_w100.tsv.pkl''' )
lowerCAmelCase = {sample['''id''']: [sample['''text'''], sample['''title''']] for sample in dataset}
pickle.dump(UpperCAmelCase_ , open(UpperCAmelCase_ , '''wb''' ) )
lowerCAmelCase = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''legacy''' , index_path=self.tmpdirname , )
lowerCAmelCase = RagRetriever(
UpperCAmelCase_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() )
return retriever
def __snake_case ( self ):
lowerCAmelCase = 1
lowerCAmelCase = self.get_dummy_canonical_hf_index_retriever()
lowerCAmelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = retriever.retrieve(UpperCAmelCase_ , n_docs=UpperCAmelCase_ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(UpperCAmelCase_ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , UpperCAmelCase_ )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def __snake_case ( self ):
lowerCAmelCase = self.get_dummy_canonical_hf_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset:
lowerCAmelCase = self.get_dummy_dataset()
retriever.save_pretrained(UpperCAmelCase_ )
lowerCAmelCase = RagRetriever.from_pretrained(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
lowerCAmelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowerCAmelCase = retriever.retrieve(UpperCAmelCase_ , n_docs=1 )
self.assertTrue(out is not None )
def __snake_case ( self ):
lowerCAmelCase = 1
lowerCAmelCase = self.get_dummy_custom_hf_index_retriever(from_disk=UpperCAmelCase_ )
lowerCAmelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = retriever.retrieve(UpperCAmelCase_ , n_docs=UpperCAmelCase_ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(UpperCAmelCase_ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , UpperCAmelCase_ )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def __snake_case ( self ):
lowerCAmelCase = self.get_dummy_custom_hf_index_retriever(from_disk=UpperCAmelCase_ )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(UpperCAmelCase_ )
lowerCAmelCase = RagRetriever.from_pretrained(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
lowerCAmelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowerCAmelCase = retriever.retrieve(UpperCAmelCase_ , n_docs=1 )
self.assertTrue(out is not None )
def __snake_case ( self ):
lowerCAmelCase = 1
lowerCAmelCase = self.get_dummy_custom_hf_index_retriever(from_disk=UpperCAmelCase_ )
lowerCAmelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = retriever.retrieve(UpperCAmelCase_ , n_docs=UpperCAmelCase_ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(UpperCAmelCase_ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , UpperCAmelCase_ )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def __snake_case ( self ):
lowerCAmelCase = self.get_dummy_custom_hf_index_retriever(from_disk=UpperCAmelCase_ )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(UpperCAmelCase_ )
lowerCAmelCase = RagRetriever.from_pretrained(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
lowerCAmelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowerCAmelCase = retriever.retrieve(UpperCAmelCase_ , n_docs=1 )
self.assertTrue(out is not None )
def __snake_case ( self ):
lowerCAmelCase = 1
lowerCAmelCase = self.get_dummy_legacy_index_retriever()
lowerCAmelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = retriever.retrieve(UpperCAmelCase_ , n_docs=UpperCAmelCase_ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(UpperCAmelCase_ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''text'''] ) , UpperCAmelCase_ )
self.assertEqual(doc_dicts[0]['''text'''][0] , '''bar''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''text'''][0] , '''foo''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def __snake_case ( self ):
lowerCAmelCase = self.get_dummy_legacy_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(UpperCAmelCase_ )
lowerCAmelCase = RagRetriever.from_pretrained(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
lowerCAmelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowerCAmelCase = retriever.retrieve(UpperCAmelCase_ , n_docs=1 )
self.assertTrue(out is not None )
@require_torch
@require_tokenizers
@require_sentencepiece
def __snake_case ( self ):
import torch
lowerCAmelCase = 1
lowerCAmelCase = self.get_dummy_canonical_hf_index_retriever()
lowerCAmelCase = [[5, 7], [10, 11]]
lowerCAmelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowerCAmelCase = retriever(UpperCAmelCase_ , UpperCAmelCase_ , prefix=retriever.config.generator.prefix , n_docs=UpperCAmelCase_ )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = (
out['''context_input_ids'''],
out['''context_attention_mask'''],
out['''retrieved_doc_embeds'''],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , np.ndarray )
lowerCAmelCase = retriever(
UpperCAmelCase_ , UpperCAmelCase_ , prefix=retriever.config.generator.prefix , n_docs=UpperCAmelCase_ , return_tensors='''pt''' , )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = ( # noqa: F841
out['''context_input_ids'''],
out['''context_attention_mask'''],
out['''retrieved_doc_embeds'''],
out['''doc_ids'''],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(UpperCAmelCase_ , torch.Tensor )
self.assertIsInstance(UpperCAmelCase_ , torch.Tensor )
self.assertIsInstance(UpperCAmelCase_ , torch.Tensor )
@require_torch
@require_tokenizers
@require_sentencepiece
def __snake_case ( self ):
lowerCAmelCase = self.get_dpr_ctx_encoder_tokenizer()
lowerCAmelCase = 1
lowerCAmelCase = self.get_dummy_custom_hf_index_retriever(from_disk=UpperCAmelCase_ )
retriever.set_ctx_encoder_tokenizer(UpperCAmelCase_ )
lowerCAmelCase = [[5, 7], [10, 11]]
lowerCAmelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowerCAmelCase = retriever(UpperCAmelCase_ , UpperCAmelCase_ , prefix=retriever.config.generator.prefix , n_docs=UpperCAmelCase_ )
self.assertEqual(
len(UpperCAmelCase_ ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs
self.assertEqual(
all(k in out for k in ('''tokenized_doc_ids''', '''tokenized_doc_attention_mask''') ) , UpperCAmelCase_ ) # check for doc token related keys in dictionary.
| 718
|
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from typing import Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import randn_tensor
from .scheduling_utils import SchedulerMixin
class __UpperCamelCase ( __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__a : Any =1
@register_to_config
def __init__( self , UpperCAmelCase_=20_00 , UpperCAmelCase_=0.1 , UpperCAmelCase_=20 , UpperCAmelCase_=1E-3 ):
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = None
def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ = None ):
lowerCAmelCase = torch.linspace(1 , self.config.sampling_eps , UpperCAmelCase_ , device=UpperCAmelCase_ )
def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=None ):
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' )
# TODO(Patrick) better comments + non-PyTorch
# postprocess model score
lowerCAmelCase = (
-0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
)
lowerCAmelCase = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) )
lowerCAmelCase = std.flatten()
while len(std.shape ) < len(score.shape ):
lowerCAmelCase = std.unsqueeze(-1 )
lowerCAmelCase = -score / std
# compute
lowerCAmelCase = -1.0 / len(self.timesteps )
lowerCAmelCase = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min)
lowerCAmelCase = beta_t.flatten()
while len(beta_t.shape ) < len(x.shape ):
lowerCAmelCase = beta_t.unsqueeze(-1 )
lowerCAmelCase = -0.5 * beta_t * x
lowerCAmelCase = torch.sqrt(UpperCAmelCase_ )
lowerCAmelCase = drift - diffusion**2 * score
lowerCAmelCase = x + drift * dt
# add noise
lowerCAmelCase = randn_tensor(x.shape , layout=x.layout , generator=UpperCAmelCase_ , device=x.device , dtype=x.dtype )
lowerCAmelCase = x_mean + diffusion * math.sqrt(-dt ) * noise
return x, x_mean
def __len__( self ):
return self.config.num_train_timesteps
| 33
| 0
|
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class __UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __snake_case ( self ):
lowerCAmelCase = '''| <pad> <unk> <s> </s> a b c d e f g h i j k'''.split()
lowerCAmelCase = dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_ ) ) ) )
lowerCAmelCase = {
'''unk_token''': '''<unk>''',
'''bos_token''': '''<s>''',
'''eos_token''': '''</s>''',
}
lowerCAmelCase = {
'''feature_size''': 1,
'''padding_value''': 0.0,
'''sampling_rate''': 1_60_00,
'''return_attention_mask''': False,
'''do_normalize''': True,
}
lowerCAmelCase = tempfile.mkdtemp()
lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCAmelCase = os.path.join(self.tmpdirname , UpperCAmelCase_ )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(UpperCAmelCase_ ) + '''\n''' )
with open(self.feature_extraction_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(UpperCAmelCase_ ) + '''\n''' )
# load decoder from hub
lowerCAmelCase = '''hf-internal-testing/ngram-beam-search-decoder'''
def __snake_case ( self , **UpperCAmelCase_ ):
lowerCAmelCase = self.add_kwargs_tokens_map.copy()
kwargs.update(UpperCAmelCase_ )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase_ )
def __snake_case ( self , **UpperCAmelCase_ ):
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **UpperCAmelCase_ )
def __snake_case ( self , **UpperCAmelCase_ ):
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **UpperCAmelCase_ )
def __snake_case ( self ):
shutil.rmtree(self.tmpdirname )
def __snake_case ( self ):
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = self.get_feature_extractor()
lowerCAmelCase = self.get_decoder()
lowerCAmelCase = WavaVecaProcessorWithLM(tokenizer=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ , decoder=UpperCAmelCase_ )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCAmelCase_ )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , UpperCAmelCase_ )
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , UpperCAmelCase_ )
def __snake_case ( self ):
lowerCAmelCase = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
lowerCAmelCase = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha , 5.0 )
self.assertEqual(processor.language_model.beta , 3.0 )
self.assertEqual(processor.language_model.score_boundary , -7.0 )
self.assertEqual(processor.language_model.unk_score_offset , 3 )
def __snake_case ( self ):
lowerCAmelCase = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(['''xx'''] )
with self.assertRaisesRegex(UpperCAmelCase_ , '''include''' ):
WavaVecaProcessorWithLM(
tokenizer=UpperCAmelCase_ , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
def __snake_case ( self ):
lowerCAmelCase = self.get_feature_extractor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = self.get_decoder()
lowerCAmelCase = WavaVecaProcessorWithLM(tokenizer=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ , decoder=UpperCAmelCase_ )
lowerCAmelCase = floats_list((3, 10_00) )
lowerCAmelCase = feature_extractor(UpperCAmelCase_ , return_tensors='''np''' )
lowerCAmelCase = processor(UpperCAmelCase_ , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __snake_case ( self ):
lowerCAmelCase = self.get_feature_extractor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = self.get_decoder()
lowerCAmelCase = WavaVecaProcessorWithLM(tokenizer=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ , decoder=UpperCAmelCase_ )
lowerCAmelCase = '''This is a test string'''
lowerCAmelCase = processor(text=UpperCAmelCase_ )
lowerCAmelCase = tokenizer(UpperCAmelCase_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __snake_case ( self , UpperCAmelCase_=(2, 10, 16) , UpperCAmelCase_=77 ):
np.random.seed(UpperCAmelCase_ )
return np.random.rand(*UpperCAmelCase_ )
def __snake_case ( self ):
lowerCAmelCase = self.get_feature_extractor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = self.get_decoder()
lowerCAmelCase = WavaVecaProcessorWithLM(tokenizer=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ , decoder=UpperCAmelCase_ )
lowerCAmelCase = self._get_dummy_logits(shape=(10, 16) , seed=13 )
lowerCAmelCase = processor.decode(UpperCAmelCase_ )
lowerCAmelCase = decoder.decode_beams(UpperCAmelCase_ )[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text )
self.assertEqual('''</s> <s> </s>''' , decoded_processor.text )
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score )
@parameterized.expand([[None], ['''fork'''], ['''spawn''']] )
def __snake_case ( self , UpperCAmelCase_ ):
lowerCAmelCase = self.get_feature_extractor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = self.get_decoder()
lowerCAmelCase = WavaVecaProcessorWithLM(tokenizer=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ , decoder=UpperCAmelCase_ )
lowerCAmelCase = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
lowerCAmelCase = processor.batch_decode(UpperCAmelCase_ )
else:
with get_context(UpperCAmelCase_ ).Pool() as pool:
lowerCAmelCase = processor.batch_decode(UpperCAmelCase_ , UpperCAmelCase_ )
lowerCAmelCase = list(UpperCAmelCase_ )
with get_context('''fork''' ).Pool() as p:
lowerCAmelCase = decoder.decode_beams_batch(UpperCAmelCase_ , UpperCAmelCase_ )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(UpperCAmelCase_ , decoded_processor.text )
self.assertListEqual(['''<s> <s> </s>''', '''<s> <s> <s>'''] , decoded_processor.text )
self.assertListEqual(UpperCAmelCase_ , decoded_processor.logit_score )
self.assertListEqual(UpperCAmelCase_ , decoded_processor.lm_score )
def __snake_case ( self ):
lowerCAmelCase = self.get_feature_extractor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = self.get_decoder()
lowerCAmelCase = WavaVecaProcessorWithLM(tokenizer=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ , decoder=UpperCAmelCase_ )
lowerCAmelCase = self._get_dummy_logits()
lowerCAmelCase = 15
lowerCAmelCase = -20.0
lowerCAmelCase = -4.0
lowerCAmelCase = processor.batch_decode(
UpperCAmelCase_ , beam_width=UpperCAmelCase_ , beam_prune_logp=UpperCAmelCase_ , token_min_logp=UpperCAmelCase_ , )
lowerCAmelCase = decoded_processor_out.text
lowerCAmelCase = list(UpperCAmelCase_ )
with get_context('''fork''' ).Pool() as pool:
lowerCAmelCase = decoder.decode_beams_batch(
UpperCAmelCase_ , UpperCAmelCase_ , beam_width=UpperCAmelCase_ , beam_prune_logp=UpperCAmelCase_ , token_min_logp=UpperCAmelCase_ , )
lowerCAmelCase = [d[0][0] for d in decoded_decoder_out]
lowerCAmelCase = [d[0][2] for d in decoded_decoder_out]
lowerCAmelCase = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
self.assertListEqual(['''</s> <s> <s>''', '''<s> <s> <s>'''] , UpperCAmelCase_ )
self.assertTrue(np.array_equal(UpperCAmelCase_ , decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-20.054, -18.447] , UpperCAmelCase_ , atol=1E-3 ) )
self.assertTrue(np.array_equal(UpperCAmelCase_ , decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-15.554, -13.9474] , UpperCAmelCase_ , atol=1E-3 ) )
def __snake_case ( self ):
lowerCAmelCase = self.get_feature_extractor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = self.get_decoder()
lowerCAmelCase = WavaVecaProcessorWithLM(tokenizer=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ , decoder=UpperCAmelCase_ )
lowerCAmelCase = self._get_dummy_logits()
lowerCAmelCase = 2.0
lowerCAmelCase = 5.0
lowerCAmelCase = -20.0
lowerCAmelCase = True
lowerCAmelCase = processor.batch_decode(
UpperCAmelCase_ , alpha=UpperCAmelCase_ , beta=UpperCAmelCase_ , unk_score_offset=UpperCAmelCase_ , lm_score_boundary=UpperCAmelCase_ , )
lowerCAmelCase = decoded_processor_out.text
lowerCAmelCase = list(UpperCAmelCase_ )
decoder.reset_params(
alpha=UpperCAmelCase_ , beta=UpperCAmelCase_ , unk_score_offset=UpperCAmelCase_ , lm_score_boundary=UpperCAmelCase_ , )
with get_context('''fork''' ).Pool() as pool:
lowerCAmelCase = decoder.decode_beams_batch(
UpperCAmelCase_ , UpperCAmelCase_ , )
lowerCAmelCase = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
self.assertListEqual(['''<s> </s> <s> </s> </s>''', '''</s> </s> <s> </s> </s>'''] , UpperCAmelCase_ )
lowerCAmelCase = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0 )
self.assertEqual(lm_model.beta , 5.0 )
self.assertEqual(lm_model.unk_score_offset , -20.0 )
self.assertEqual(lm_model.score_boundary , UpperCAmelCase_ )
def __snake_case ( self ):
lowerCAmelCase = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
lowerCAmelCase = processor.decoder.model_container[processor.decoder._model_key]
lowerCAmelCase = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute()
lowerCAmelCase = os.listdir(UpperCAmelCase_ )
lowerCAmelCase = ['''alphabet.json''', '''language_model''']
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def __snake_case ( self ):
lowerCAmelCase = snapshot_download('''hf-internal-testing/processor_with_lm''' )
lowerCAmelCase = WavaVecaProcessorWithLM.from_pretrained(UpperCAmelCase_ )
lowerCAmelCase = processor.decoder.model_container[processor.decoder._model_key]
lowerCAmelCase = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute()
lowerCAmelCase = os.listdir(UpperCAmelCase_ )
lowerCAmelCase = os.listdir(UpperCAmelCase_ )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def __snake_case ( self ):
lowerCAmelCase = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
lowerCAmelCase = AutoProcessor.from_pretrained('''hf-internal-testing/processor_with_lm''' )
lowerCAmelCase = floats_list((3, 10_00) )
lowerCAmelCase = processor_wavaveca(UpperCAmelCase_ , return_tensors='''np''' )
lowerCAmelCase = processor_auto(UpperCAmelCase_ , return_tensors='''np''' )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1E-2 )
lowerCAmelCase = self._get_dummy_logits()
lowerCAmelCase = processor_wavaveca.batch_decode(UpperCAmelCase_ )
lowerCAmelCase = processor_auto.batch_decode(UpperCAmelCase_ )
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text )
def __snake_case ( self ):
lowerCAmelCase = self.get_feature_extractor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = self.get_decoder()
lowerCAmelCase = WavaVecaProcessorWithLM(tokenizer=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ , decoder=UpperCAmelCase_ )
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg='''`processor` and `feature_extractor` model input names do not match''' , )
@staticmethod
def __snake_case ( UpperCAmelCase_ , UpperCAmelCase_ ):
lowerCAmelCase = [d[key] for d in offsets]
return retrieved_list
def __snake_case ( self ):
lowerCAmelCase = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
lowerCAmelCase = self._get_dummy_logits()[0]
lowerCAmelCase = processor.decode(UpperCAmelCase_ , output_word_offsets=UpperCAmelCase_ )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''word_offsets''' in outputs )
self.assertTrue(isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) )
self.assertEqual(''' '''.join(self.get_from_offsets(outputs['''word_offsets'''] , '''word''' ) ) , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''word''' ) , ['''<s>''', '''<s>''', '''</s>'''] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''start_offset''' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''end_offset''' ) , [1, 3, 5] )
def __snake_case ( self ):
lowerCAmelCase = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
lowerCAmelCase = self._get_dummy_logits()
lowerCAmelCase = processor.batch_decode(UpperCAmelCase_ , output_word_offsets=UpperCAmelCase_ )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''word_offsets''' in outputs )
self.assertTrue(isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) )
self.assertListEqual(
[''' '''.join(self.get_from_offsets(UpperCAmelCase_ , '''word''' ) ) for o in outputs['''word_offsets''']] , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''word''' ) , ['''<s>''', '''<s>''', '''</s>'''] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''start_offset''' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''end_offset''' ) , [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def __snake_case ( self ):
import torch
lowerCAmelCase = load_dataset('''common_voice''' , '''en''' , split='''train''' , streaming=UpperCAmelCase_ )
lowerCAmelCase = ds.cast_column('''audio''' , datasets.Audio(sampling_rate=1_60_00 ) )
lowerCAmelCase = iter(UpperCAmelCase_ )
lowerCAmelCase = next(UpperCAmelCase_ )
lowerCAmelCase = AutoProcessor.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' )
lowerCAmelCase = WavaVecaForCTC.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
lowerCAmelCase = processor(sample['''audio''']['''array'''] , return_tensors='''pt''' ).input_values
with torch.no_grad():
lowerCAmelCase = model(UpperCAmelCase_ ).logits.cpu().numpy()
lowerCAmelCase = processor.decode(logits[0] , output_word_offsets=UpperCAmelCase_ )
lowerCAmelCase = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
lowerCAmelCase = [
{
'''start_time''': d['''start_offset'''] * time_offset,
'''end_time''': d['''end_offset'''] * time_offset,
'''word''': d['''word'''],
}
for d in output['''word_offsets''']
]
lowerCAmelCase = '''WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL'''
# output words
self.assertEqual(''' '''.join(self.get_from_offsets(UpperCAmelCase_ , '''word''' ) ) , UpperCAmelCase_ )
self.assertEqual(''' '''.join(self.get_from_offsets(UpperCAmelCase_ , '''word''' ) ) , output.text )
# output times
lowerCAmelCase = torch.tensor(self.get_from_offsets(UpperCAmelCase_ , '''start_time''' ) )
lowerCAmelCase = torch.tensor(self.get_from_offsets(UpperCAmelCase_ , '''end_time''' ) )
# fmt: off
lowerCAmelCase = torch.tensor([1.4199, 1.6599, 2.2599, 3.0, 3.24, 3.5999, 3.7999, 4.0999, 4.26, 4.94, 5.28, 5.6599, 5.78, 5.94, 6.32, 6.5399, 6.6599] )
lowerCAmelCase = torch.tensor([1.5399, 1.8999, 2.9, 3.16, 3.5399, 3.72, 4.0199, 4.1799, 4.76, 5.1599, 5.5599, 5.6999, 5.86, 6.1999, 6.38, 6.6199, 6.94] )
# fmt: on
self.assertTrue(torch.allclose(UpperCAmelCase_ , UpperCAmelCase_ , atol=0.01 ) )
self.assertTrue(torch.allclose(UpperCAmelCase_ , UpperCAmelCase_ , atol=0.01 ) )
| 719
|
from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class __UpperCamelCase ( yaml.SafeLoader ):
'''simple docstring'''
def __snake_case ( self , UpperCAmelCase_ ):
lowerCAmelCase = [self.constructed_objects[key_node] for key_node, _ in node.value]
lowerCAmelCase = [tuple(UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else key for key in keys]
lowerCAmelCase = Counter(UpperCAmelCase_ )
lowerCAmelCase = [key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(F"""Got duplicate yaml keys: {duplicate_keys}""" )
def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_=False ):
lowerCAmelCase = super().construct_mapping(UpperCAmelCase_ , deep=UpperCAmelCase_ )
self._check_no_duplicates_on_constructed_node(UpperCAmelCase_ )
return mapping
def UpperCAmelCase ( _snake_case ):
lowerCAmelCase = list(readme_content.splitlines() )
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
lowerCAmelCase = full_content[1:].index('''---''' ) + 1
lowerCAmelCase = '''\n'''.join(full_content[1:sep_idx] )
return yamlblock, "\n".join(full_content[sep_idx + 1 :] )
return None, "\n".join(_snake_case )
class __UpperCamelCase ( __UpperCAmelCase ):
'''simple docstring'''
__a : Any ={"""train_eval_index"""} # train-eval-index in the YAML metadata
@classmethod
def __snake_case ( cls , UpperCAmelCase_ ):
with open(UpperCAmelCase_ , encoding='''utf-8''' ) as readme_file:
lowerCAmelCase , lowerCAmelCase = _split_yaml_from_readme(readme_file.read() )
if yaml_string is not None:
return cls.from_yaml_string(UpperCAmelCase_ )
else:
return cls()
def __snake_case ( self , UpperCAmelCase_ ):
if path.exists():
with open(UpperCAmelCase_ , encoding='''utf-8''' ) as readme_file:
lowerCAmelCase = readme_file.read()
else:
lowerCAmelCase = None
lowerCAmelCase = self._to_readme(UpperCAmelCase_ )
with open(UpperCAmelCase_ , '''w''' , encoding='''utf-8''' ) as readme_file:
readme_file.write(UpperCAmelCase_ )
def __snake_case ( self , UpperCAmelCase_ = None ):
if readme_content is not None:
lowerCAmelCase , lowerCAmelCase = _split_yaml_from_readme(UpperCAmelCase_ )
lowerCAmelCase = '''---\n''' + self.to_yaml_string() + '''---\n''' + content
else:
lowerCAmelCase = '''---\n''' + self.to_yaml_string() + '''---\n'''
return full_content
@classmethod
def __snake_case ( cls , UpperCAmelCase_ ):
lowerCAmelCase = yaml.load(UpperCAmelCase_ , Loader=_NoDuplicateSafeLoader ) or {}
# Convert the YAML keys to DatasetMetadata fields
lowerCAmelCase = {
(key.replace('''-''' , '''_''' ) if key.replace('''-''' , '''_''' ) in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**UpperCAmelCase_ )
def __snake_case ( self ):
return yaml.safe_dump(
{
(key.replace('''_''' , '''-''' ) if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
} , sort_keys=UpperCAmelCase_ , allow_unicode=UpperCAmelCase_ , encoding='''utf-8''' , ).decode('''utf-8''' )
UpperCAmelCase_ ={
"""image-classification""": [],
"""translation""": [],
"""image-segmentation""": [],
"""fill-mask""": [],
"""automatic-speech-recognition""": [],
"""token-classification""": [],
"""sentence-similarity""": [],
"""audio-classification""": [],
"""question-answering""": [],
"""summarization""": [],
"""zero-shot-classification""": [],
"""table-to-text""": [],
"""feature-extraction""": [],
"""other""": [],
"""multiple-choice""": [],
"""text-classification""": [],
"""text-to-image""": [],
"""text2text-generation""": [],
"""zero-shot-image-classification""": [],
"""tabular-classification""": [],
"""tabular-regression""": [],
"""image-to-image""": [],
"""tabular-to-text""": [],
"""unconditional-image-generation""": [],
"""text-retrieval""": [],
"""text-to-speech""": [],
"""object-detection""": [],
"""audio-to-audio""": [],
"""text-generation""": [],
"""conversational""": [],
"""table-question-answering""": [],
"""visual-question-answering""": [],
"""image-to-text""": [],
"""reinforcement-learning""": [],
"""voice-activity-detection""": [],
"""time-series-forecasting""": [],
"""document-question-answering""": [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
UpperCAmelCase_ =ArgumentParser(usage="""Validate the yaml metadata block of a README.md file.""")
ap.add_argument("""readme_filepath""")
UpperCAmelCase_ =ap.parse_args()
UpperCAmelCase_ =Path(args.readme_filepath)
UpperCAmelCase_ =DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath)
| 33
| 0
|
from ....configuration_utils import PretrainedConfig
from ....utils import logging
UpperCAmelCase_ =logging.get_logger(__name__)
UpperCAmelCase_ ={
"""CarlCochet/trajectory-transformer-halfcheetah-medium-v2""": (
"""https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json"""
),
# See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer
}
class __UpperCamelCase ( __UpperCAmelCase ):
'''simple docstring'''
__a : List[Any] ="""trajectory_transformer"""
__a : str =["""past_key_values"""]
__a : List[Any] ={
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , UpperCAmelCase_=1_00 , UpperCAmelCase_=5 , UpperCAmelCase_=1 , UpperCAmelCase_=1 , UpperCAmelCase_=2_49 , UpperCAmelCase_=6 , UpperCAmelCase_=17 , UpperCAmelCase_=25 , UpperCAmelCase_=4 , UpperCAmelCase_=4 , UpperCAmelCase_=1_28 , UpperCAmelCase_=0.1 , UpperCAmelCase_=0.1 , UpperCAmelCase_=0.1 , UpperCAmelCase_=0.0006 , UpperCAmelCase_=5_12 , UpperCAmelCase_=0.02 , UpperCAmelCase_=1E-1_2 , UpperCAmelCase_=1 , UpperCAmelCase_=True , UpperCAmelCase_=1 , UpperCAmelCase_=5_02_56 , UpperCAmelCase_=5_02_56 , **UpperCAmelCase_ , ):
lowerCAmelCase = vocab_size
lowerCAmelCase = action_weight
lowerCAmelCase = reward_weight
lowerCAmelCase = value_weight
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = block_size
lowerCAmelCase = action_dim
lowerCAmelCase = observation_dim
lowerCAmelCase = transition_dim
lowerCAmelCase = learning_rate
lowerCAmelCase = n_layer
lowerCAmelCase = n_head
lowerCAmelCase = n_embd
lowerCAmelCase = embd_pdrop
lowerCAmelCase = attn_pdrop
lowerCAmelCase = resid_pdrop
lowerCAmelCase = initializer_range
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = kaiming_initializer_range
lowerCAmelCase = use_cache
super().__init__(pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , **UpperCAmelCase_ )
| 720
|
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class __UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
__a : str =MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
lowerCAmelCase = hf_hub_download(
repo_id='''nateraw/video-demo''' , filename='''archery.mp4''' , repo_type='''dataset''' )
lowerCAmelCase = VideoClassificationPipeline(model=UpperCAmelCase_ , image_processor=UpperCAmelCase_ , top_k=2 )
lowerCAmelCase = [
example_video_filepath,
'''https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4''',
]
return video_classifier, examples
def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ ):
for example in examples:
lowerCAmelCase = video_classifier(UpperCAmelCase_ )
self.assertEqual(
UpperCAmelCase_ , [
{'''score''': ANY(UpperCAmelCase_ ), '''label''': ANY(UpperCAmelCase_ )},
{'''score''': ANY(UpperCAmelCase_ ), '''label''': ANY(UpperCAmelCase_ )},
] , )
@require_torch
def __snake_case ( self ):
lowerCAmelCase = '''hf-internal-testing/tiny-random-VideoMAEForVideoClassification'''
lowerCAmelCase = VideoMAEFeatureExtractor(
size={'''shortest_edge''': 10} , crop_size={'''height''': 10, '''width''': 10} )
lowerCAmelCase = pipeline(
'''video-classification''' , model=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ , frame_sampling_rate=4 )
lowerCAmelCase = hf_hub_download(repo_id='''nateraw/video-demo''' , filename='''archery.mp4''' , repo_type='''dataset''' )
lowerCAmelCase = video_classifier(UpperCAmelCase_ , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase_ , decimals=4 ) , [{'''score''': 0.5199, '''label''': '''LABEL_0'''}, {'''score''': 0.4801, '''label''': '''LABEL_1'''}] , )
lowerCAmelCase = video_classifier(
[
video_file_path,
video_file_path,
] , top_k=2 , )
self.assertEqual(
nested_simplify(UpperCAmelCase_ , decimals=4 ) , [
[{'''score''': 0.5199, '''label''': '''LABEL_0'''}, {'''score''': 0.4801, '''label''': '''LABEL_1'''}],
[{'''score''': 0.5199, '''label''': '''LABEL_0'''}, {'''score''': 0.4801, '''label''': '''LABEL_1'''}],
] , )
@require_tf
def __snake_case ( self ):
pass
| 33
| 0
|
import warnings
from ...utils import logging
from .image_processing_videomae import VideoMAEImageProcessor
UpperCAmelCase_ =logging.get_logger(__name__)
class __UpperCamelCase ( __UpperCAmelCase ):
'''simple docstring'''
def __init__( self , *UpperCAmelCase_ , **UpperCAmelCase_ ):
warnings.warn(
'''The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use VideoMAEImageProcessor instead.''' , UpperCAmelCase_ , )
super().__init__(*UpperCAmelCase_ , **UpperCAmelCase_ )
| 721
|
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class __UpperCamelCase ( __UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
__a : Any ="""hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline"""
def __snake_case ( self , UpperCAmelCase_=0 ):
lowerCAmelCase = floats_tensor((1, 3, 1_28, 1_28) , rng=random.Random(UpperCAmelCase_ ) )
lowerCAmelCase = np.random.RandomState(UpperCAmelCase_ )
lowerCAmelCase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 3,
'''strength''': 0.75,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def __snake_case ( self ):
lowerCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
lowerCAmelCase = self.get_dummy_inputs()
lowerCAmelCase = pipe(**UpperCAmelCase_ ).images
lowerCAmelCase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 1_28, 1_28, 3)
lowerCAmelCase = np.array([0.69643, 0.58484, 0.50314, 0.58760, 0.55368, 0.59643, 0.51529, 0.41217, 0.49087] )
assert np.abs(image_slice - expected_slice ).max() < 1E-1
def __snake_case ( self ):
lowerCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
lowerCAmelCase = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=UpperCAmelCase_ )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
lowerCAmelCase = self.get_dummy_inputs()
lowerCAmelCase = pipe(**UpperCAmelCase_ ).images
lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
lowerCAmelCase = np.array([0.61737, 0.54642, 0.53183, 0.54465, 0.52742, 0.60525, 0.49969, 0.40655, 0.48154] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def __snake_case ( self ):
lowerCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
lowerCAmelCase = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
# warmup pass to apply optimizations
lowerCAmelCase = pipe(**self.get_dummy_inputs() )
lowerCAmelCase = self.get_dummy_inputs()
lowerCAmelCase = pipe(**UpperCAmelCase_ ).images
lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
lowerCAmelCase = np.array([0.52761, 0.59977, 0.49033, 0.49619, 0.54282, 0.50311, 0.47600, 0.40918, 0.45203] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def __snake_case ( self ):
lowerCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
lowerCAmelCase = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
lowerCAmelCase = self.get_dummy_inputs()
lowerCAmelCase = pipe(**UpperCAmelCase_ ).images
lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
lowerCAmelCase = np.array([0.52911, 0.60004, 0.49229, 0.49805, 0.54502, 0.50680, 0.47777, 0.41028, 0.45304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def __snake_case ( self ):
lowerCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
lowerCAmelCase = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
lowerCAmelCase = self.get_dummy_inputs()
lowerCAmelCase = pipe(**UpperCAmelCase_ ).images
lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
lowerCAmelCase = np.array([0.52911, 0.60004, 0.49229, 0.49805, 0.54502, 0.50680, 0.47777, 0.41028, 0.45304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def __snake_case ( self ):
lowerCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
lowerCAmelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
lowerCAmelCase = self.get_dummy_inputs()
lowerCAmelCase = pipe(**UpperCAmelCase_ ).images
lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
lowerCAmelCase = np.array([0.65331, 0.58277, 0.48204, 0.56059, 0.53665, 0.56235, 0.50969, 0.40009, 0.46552] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
@nightly
@require_onnxruntime
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@property
def __snake_case ( self ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def __snake_case ( self ):
lowerCAmelCase = ort.SessionOptions()
lowerCAmelCase = False
return options
def __snake_case ( self ):
lowerCAmelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
lowerCAmelCase = init_image.resize((7_68, 5_12) )
# using the PNDM scheduler by default
lowerCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''onnx''' , safety_checker=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
lowerCAmelCase = '''A fantasy landscape, trending on artstation'''
lowerCAmelCase = np.random.RandomState(0 )
lowerCAmelCase = pipe(
prompt=UpperCAmelCase_ , image=UpperCAmelCase_ , strength=0.75 , guidance_scale=7.5 , num_inference_steps=10 , generator=UpperCAmelCase_ , output_type='''np''' , )
lowerCAmelCase = output.images
lowerCAmelCase = images[0, 2_55:2_58, 3_83:3_86, -1]
assert images.shape == (1, 5_12, 7_68, 3)
lowerCAmelCase = np.array([0.4909, 0.5059, 0.5372, 0.4623, 0.4876, 0.5049, 0.4820, 0.4956, 0.5019] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def __snake_case ( self ):
lowerCAmelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
lowerCAmelCase = init_image.resize((7_68, 5_12) )
lowerCAmelCase = LMSDiscreteScheduler.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , subfolder='''scheduler''' , revision='''onnx''' )
lowerCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , revision='''onnx''' , scheduler=UpperCAmelCase_ , safety_checker=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
lowerCAmelCase = '''A fantasy landscape, trending on artstation'''
lowerCAmelCase = np.random.RandomState(0 )
lowerCAmelCase = pipe(
prompt=UpperCAmelCase_ , image=UpperCAmelCase_ , strength=0.75 , guidance_scale=7.5 , num_inference_steps=20 , generator=UpperCAmelCase_ , output_type='''np''' , )
lowerCAmelCase = output.images
lowerCAmelCase = images[0, 2_55:2_58, 3_83:3_86, -1]
assert images.shape == (1, 5_12, 7_68, 3)
lowerCAmelCase = np.array([0.8043, 0.926, 0.9581, 0.8119, 0.8954, 0.913, 0.7209, 0.7463, 0.7431] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
| 33
| 0
|
import math
import numpy as np
import qiskit
from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute
def UpperCAmelCase ( _snake_case = 3 ):
if isinstance(_snake_case , _snake_case ):
raise TypeError('''number of qubits must be a integer.''' )
if number_of_qubits <= 0:
raise ValueError('''number of qubits must be > 0.''' )
if math.floor(_snake_case ) != number_of_qubits:
raise ValueError('''number of qubits must be exact integer.''' )
if number_of_qubits > 10:
raise ValueError('''number of qubits too large to simulate(>10).''' )
lowerCAmelCase = QuantumRegister(_snake_case , '''qr''' )
lowerCAmelCase = ClassicalRegister(_snake_case , '''cr''' )
lowerCAmelCase = QuantumCircuit(_snake_case , _snake_case )
lowerCAmelCase = number_of_qubits
for i in range(_snake_case ):
quantum_circuit.h(number_of_qubits - i - 1 )
counter -= 1
for j in range(_snake_case ):
quantum_circuit.cp(np.pi / 2 ** (counter - j) , _snake_case , _snake_case )
for k in range(number_of_qubits // 2 ):
quantum_circuit.swap(_snake_case , number_of_qubits - k - 1 )
# measure all the qubits
quantum_circuit.measure(_snake_case , _snake_case )
# simulate with 10000 shots
lowerCAmelCase = Aer.get_backend('''qasm_simulator''' )
lowerCAmelCase = execute(_snake_case , _snake_case , shots=10000 )
return job.result().get_counts(_snake_case )
if __name__ == "__main__":
print(
F'''Total count for quantum fourier transform state is: \
{quantum_fourier_transform(3)}'''
)
| 700
|
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def UpperCAmelCase ( _snake_case ):
lowerCAmelCase = args.pruning_method
lowerCAmelCase = args.threshold
lowerCAmelCase = args.model_name_or_path.rstrip('''/''' )
lowerCAmelCase = args.target_model_path
print(F"""Load fine-pruned model from {model_name_or_path}""" )
lowerCAmelCase = torch.load(os.path.join(_snake_case , '''pytorch_model.bin''' ) )
lowerCAmelCase = {}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
lowerCAmelCase = tensor
print(F"""Copied layer {name}""" )
elif "classifier" in name or "qa_output" in name:
lowerCAmelCase = tensor
print(F"""Copied layer {name}""" )
elif "bias" in name:
lowerCAmelCase = tensor
print(F"""Copied layer {name}""" )
else:
if pruning_method == "magnitude":
lowerCAmelCase = MagnitudeBinarizer.apply(inputs=_snake_case , threshold=_snake_case )
lowerCAmelCase = tensor * mask
print(F"""Pruned layer {name}""" )
elif pruning_method == "topK":
if "mask_scores" in name:
continue
lowerCAmelCase = name[:-6]
lowerCAmelCase = model[F"""{prefix_}mask_scores"""]
lowerCAmelCase = TopKBinarizer.apply(_snake_case , _snake_case )
lowerCAmelCase = tensor * mask
print(F"""Pruned layer {name}""" )
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
lowerCAmelCase = name[:-6]
lowerCAmelCase = model[F"""{prefix_}mask_scores"""]
lowerCAmelCase = ThresholdBinarizer.apply(_snake_case , _snake_case , _snake_case )
lowerCAmelCase = tensor * mask
print(F"""Pruned layer {name}""" )
elif pruning_method == "l0":
if "mask_scores" in name:
continue
lowerCAmelCase = name[:-6]
lowerCAmelCase = model[F"""{prefix_}mask_scores"""]
lowerCAmelCase , lowerCAmelCase = -0.1, 1.1
lowerCAmelCase = torch.sigmoid(_snake_case )
lowerCAmelCase = s * (r - l) + l
lowerCAmelCase = s_bar.clamp(min=0.0 , max=1.0 )
lowerCAmelCase = tensor * mask
print(F"""Pruned layer {name}""" )
else:
raise ValueError('''Unknown pruning method''' )
if target_model_path is None:
lowerCAmelCase = os.path.join(
os.path.dirname(_snake_case ) , F"""bertarized_{os.path.basename(_snake_case )}""" )
if not os.path.isdir(_snake_case ):
shutil.copytree(_snake_case , _snake_case )
print(F"""\nCreated folder {target_model_path}""" )
torch.save(_snake_case , os.path.join(_snake_case , '''pytorch_model.bin''' ) )
print('''\nPruned model saved! See you later!''' )
if __name__ == "__main__":
UpperCAmelCase_ =argparse.ArgumentParser()
parser.add_argument(
"""--pruning_method""",
choices=["""l0""", """magnitude""", """topK""", """sigmoied_threshold"""],
type=str,
required=True,
help=(
"""Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,"""
""" sigmoied_threshold = Soft movement pruning)"""
),
)
parser.add_argument(
"""--threshold""",
type=float,
required=False,
help=(
"""For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model."""
"""For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared."""
"""Not needed for `l0`"""
),
)
parser.add_argument(
"""--model_name_or_path""",
type=str,
required=True,
help="""Folder containing the model that was previously fine-pruned""",
)
parser.add_argument(
"""--target_model_path""",
default=None,
type=str,
required=False,
help="""Folder containing the model that was previously fine-pruned""",
)
UpperCAmelCase_ =parser.parse_args()
main(args)
| 33
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ =logging.get_logger(__name__)
UpperCAmelCase_ ={
"""google/switch-base-8""": """https://huggingface.co/google/switch-base-8/blob/main/config.json""",
}
class __UpperCamelCase ( __UpperCAmelCase ):
'''simple docstring'''
__a : Any ="""switch_transformers"""
__a : Union[str, Any] =["""past_key_values"""]
__a : Dict ={"""hidden_size""": """d_model""", """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers"""}
def __init__( self , UpperCAmelCase_=3_21_28 , UpperCAmelCase_=7_68 , UpperCAmelCase_=64 , UpperCAmelCase_=20_48 , UpperCAmelCase_=64 , UpperCAmelCase_=12 , UpperCAmelCase_=3 , UpperCAmelCase_=12 , UpperCAmelCase_=3 , UpperCAmelCase_=12 , UpperCAmelCase_=8 , UpperCAmelCase_=False , UpperCAmelCase_=0.01 , UpperCAmelCase_="float32" , UpperCAmelCase_=False , UpperCAmelCase_=32 , UpperCAmelCase_=1_28 , UpperCAmelCase_=0.1 , UpperCAmelCase_=1E-6 , UpperCAmelCase_=0.001 , UpperCAmelCase_=0.001 , UpperCAmelCase_=1.0 , UpperCAmelCase_="relu" , UpperCAmelCase_=True , UpperCAmelCase_=False , UpperCAmelCase_=True , UpperCAmelCase_=0 , UpperCAmelCase_=1 , **UpperCAmelCase_ , ):
lowerCAmelCase = vocab_size
lowerCAmelCase = d_model
lowerCAmelCase = d_kv
lowerCAmelCase = d_ff
lowerCAmelCase = num_sparse_encoder_layers
lowerCAmelCase = num_layers
lowerCAmelCase = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
lowerCAmelCase = num_sparse_decoder_layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_encoder_layers > 0:
lowerCAmelCase = self.num_layers // self.num_sparse_encoder_layers
else:
lowerCAmelCase = self.num_layers # HACK: this will create 0 sparse layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_decoder_layers > 0:
lowerCAmelCase = self.num_decoder_layers // self.num_sparse_decoder_layers
else:
lowerCAmelCase = self.num_decoder_layers # HACK: this will create 0 sparse layers
lowerCAmelCase = num_heads
lowerCAmelCase = num_experts
lowerCAmelCase = expert_capacity
lowerCAmelCase = router_bias
lowerCAmelCase = router_jitter_noise
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(F"""`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}""" )
lowerCAmelCase = router_dtype
lowerCAmelCase = router_ignore_padding_tokens
lowerCAmelCase = relative_attention_num_buckets
lowerCAmelCase = relative_attention_max_distance
lowerCAmelCase = dropout_rate
lowerCAmelCase = layer_norm_epsilon
lowerCAmelCase = initializer_factor
lowerCAmelCase = feed_forward_proj
lowerCAmelCase = use_cache
lowerCAmelCase = add_router_probs
lowerCAmelCase = router_z_loss_coef
lowerCAmelCase = router_aux_loss_coef
lowerCAmelCase = self.feed_forward_proj.split('''-''' )
lowerCAmelCase = act_info[-1]
lowerCAmelCase = act_info[0] == '''gated'''
if len(UpperCAmelCase_ ) > 1 and act_info[0] != "gated" or len(UpperCAmelCase_ ) > 2:
raise ValueError(
F"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
'''Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '''
'''\'gated-gelu\' or \'relu\'''' )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
lowerCAmelCase = '''gelu_new'''
super().__init__(
pad_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , is_encoder_decoder=UpperCAmelCase_ , **UpperCAmelCase_ , )
| 701
|
import os
import re
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase_ =logging.get_logger(__name__)
UpperCAmelCase_ ={
"""vocab_file""": """vocab.txt""",
"""merges_file""": """bpe.codes""",
}
UpperCAmelCase_ ={
"""vocab_file""": {
"""vinai/phobert-base""": """https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt""",
"""vinai/phobert-large""": """https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt""",
},
"""merges_file""": {
"""vinai/phobert-base""": """https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes""",
"""vinai/phobert-large""": """https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes""",
},
}
UpperCAmelCase_ ={
"""vinai/phobert-base""": 256,
"""vinai/phobert-large""": 256,
}
def UpperCAmelCase ( _snake_case ):
lowerCAmelCase = set()
lowerCAmelCase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowerCAmelCase = char
lowerCAmelCase = set(_snake_case )
return pairs
class __UpperCamelCase ( __UpperCAmelCase ):
'''simple docstring'''
__a : Union[str, Any] =VOCAB_FILES_NAMES
__a : Optional[Any] =PRETRAINED_VOCAB_FILES_MAP
__a : Optional[int] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_="<s>" , UpperCAmelCase_="</s>" , UpperCAmelCase_="</s>" , UpperCAmelCase_="<s>" , UpperCAmelCase_="<unk>" , UpperCAmelCase_="<pad>" , UpperCAmelCase_="<mask>" , **UpperCAmelCase_ , ):
super().__init__(
bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , **UpperCAmelCase_ , )
lowerCAmelCase = vocab_file
lowerCAmelCase = merges_file
lowerCAmelCase = {}
lowerCAmelCase = 0
lowerCAmelCase = 1
lowerCAmelCase = 2
lowerCAmelCase = 3
self.add_from_file(UpperCAmelCase_ )
lowerCAmelCase = {v: k for k, v in self.encoder.items()}
with open(UpperCAmelCase_ , encoding='''utf-8''' ) as merges_handle:
lowerCAmelCase = merges_handle.read().split('''\n''' )[:-1]
lowerCAmelCase = [tuple(merge.split()[:-1] ) for merge in merges]
lowerCAmelCase = dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_ ) ) ) )
lowerCAmelCase = {}
def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
lowerCAmelCase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ = None , UpperCAmelCase_ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase_ , token_ids_a=UpperCAmelCase_ , already_has_special_tokens=UpperCAmelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(UpperCAmelCase_ )) + [1]
return [1] + ([0] * len(UpperCAmelCase_ )) + [1, 1] + ([0] * len(UpperCAmelCase_ )) + [1]
def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ = None ):
lowerCAmelCase = [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def __snake_case ( self ):
return len(self.encoder )
def __snake_case ( self ):
return dict(self.encoder , **self.added_tokens_encoder )
def __snake_case ( self , UpperCAmelCase_ ):
if token in self.cache:
return self.cache[token]
lowerCAmelCase = tuple(UpperCAmelCase_ )
lowerCAmelCase = tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] )
lowerCAmelCase = get_pairs(UpperCAmelCase_ )
if not pairs:
return token
while True:
lowerCAmelCase = min(UpperCAmelCase_ , key=lambda UpperCAmelCase_ : self.bpe_ranks.get(UpperCAmelCase_ , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
lowerCAmelCase , lowerCAmelCase = bigram
lowerCAmelCase = []
lowerCAmelCase = 0
while i < len(UpperCAmelCase_ ):
try:
lowerCAmelCase = word.index(UpperCAmelCase_ , UpperCAmelCase_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowerCAmelCase = j
if word[i] == first and i < len(UpperCAmelCase_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowerCAmelCase = tuple(UpperCAmelCase_ )
lowerCAmelCase = new_word
if len(UpperCAmelCase_ ) == 1:
break
else:
lowerCAmelCase = get_pairs(UpperCAmelCase_ )
lowerCAmelCase = '''@@ '''.join(UpperCAmelCase_ )
lowerCAmelCase = word[:-4]
lowerCAmelCase = word
return word
def __snake_case ( self , UpperCAmelCase_ ):
lowerCAmelCase = []
lowerCAmelCase = re.findall(r'''\S+\n?''' , UpperCAmelCase_ )
for token in words:
split_tokens.extend(list(self.bpe(UpperCAmelCase_ ).split(''' ''' ) ) )
return split_tokens
def __snake_case ( self , UpperCAmelCase_ ):
return self.encoder.get(UpperCAmelCase_ , self.encoder.get(self.unk_token ) )
def __snake_case ( self , UpperCAmelCase_ ):
return self.decoder.get(UpperCAmelCase_ , self.unk_token )
def __snake_case ( self , UpperCAmelCase_ ):
lowerCAmelCase = ''' '''.join(UpperCAmelCase_ ).replace('''@@ ''' , '''''' ).strip()
return out_string
def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ = None ):
if not os.path.isdir(UpperCAmelCase_ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCAmelCase = os.path.join(
UpperCAmelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCAmelCase = os.path.join(
UpperCAmelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase_ ):
copyfile(self.vocab_file , UpperCAmelCase_ )
if os.path.abspath(self.merges_file ) != os.path.abspath(UpperCAmelCase_ ):
copyfile(self.merges_file , UpperCAmelCase_ )
return out_vocab_file, out_merge_file
def __snake_case ( self , UpperCAmelCase_ ):
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
try:
with open(UpperCAmelCase_ , '''r''' , encoding='''utf-8''' ) as fd:
self.add_from_file(UpperCAmelCase_ )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception(F"""Incorrect encoding detected in {f}, please rebuild the dataset""" )
return
lowerCAmelCase = f.readlines()
for lineTmp in lines:
lowerCAmelCase = lineTmp.strip()
lowerCAmelCase = line.rfind(''' ''' )
if idx == -1:
raise ValueError('''Incorrect dictionary format, expected \'<token> <cnt>\'''' )
lowerCAmelCase = line[:idx]
lowerCAmelCase = len(self.encoder )
| 33
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase_ ={
"""configuration_megatron_bert""": ["""MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MegatronBertConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ =[
"""MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MegatronBertForCausalLM""",
"""MegatronBertForMaskedLM""",
"""MegatronBertForMultipleChoice""",
"""MegatronBertForNextSentencePrediction""",
"""MegatronBertForPreTraining""",
"""MegatronBertForQuestionAnswering""",
"""MegatronBertForSequenceClassification""",
"""MegatronBertForTokenClassification""",
"""MegatronBertModel""",
"""MegatronBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_megatron_bert import (
MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
MegatronBertPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 702
|
from __future__ import annotations
from typing import Generic, TypeVar
UpperCAmelCase_ =TypeVar("""T""")
class __UpperCamelCase ( Generic[T] ):
'''simple docstring'''
def __init__( self , UpperCAmelCase_ ):
lowerCAmelCase = data
lowerCAmelCase = self
lowerCAmelCase = 0
class __UpperCamelCase ( Generic[T] ):
'''simple docstring'''
def __init__( self ):
# map from node name to the node object
lowerCAmelCase = {}
def __snake_case ( self , UpperCAmelCase_ ):
# create a new set with x as its member
lowerCAmelCase = DisjointSetTreeNode(UpperCAmelCase_ )
def __snake_case ( self , UpperCAmelCase_ ):
# find the set x belongs to (with path-compression)
lowerCAmelCase = self.map[data]
if elem_ref != elem_ref.parent:
lowerCAmelCase = self.find_set(elem_ref.parent.data )
return elem_ref.parent
def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ ):
# helper function for union operation
if nodea.rank > nodea.rank:
lowerCAmelCase = nodea
else:
lowerCAmelCase = nodea
if nodea.rank == nodea.rank:
nodea.rank += 1
def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ ):
# merge 2 disjoint sets
self.link(self.find_set(UpperCAmelCase_ ) , self.find_set(UpperCAmelCase_ ) )
class __UpperCamelCase ( Generic[T] ):
'''simple docstring'''
def __init__( self ):
# connections: map from the node to the neighbouring nodes (with weights)
lowerCAmelCase = {}
def __snake_case ( self , UpperCAmelCase_ ):
# add a node ONLY if its not present in the graph
if node not in self.connections:
lowerCAmelCase = {}
def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
# add an edge with the given weight
self.add_node(UpperCAmelCase_ )
self.add_node(UpperCAmelCase_ )
lowerCAmelCase = weight
lowerCAmelCase = weight
def __snake_case ( self ):
lowerCAmelCase = []
lowerCAmelCase = set()
for start in self.connections:
for end in self.connections[start]:
if (start, end) not in seen:
seen.add((end, start) )
edges.append((start, end, self.connections[start][end]) )
edges.sort(key=lambda UpperCAmelCase_ : x[2] )
# creating the disjoint set
lowerCAmelCase = DisjointSetTree[T]()
for node in self.connections:
disjoint_set.make_set(UpperCAmelCase_ )
# MST generation
lowerCAmelCase = 0
lowerCAmelCase = 0
lowerCAmelCase = GraphUndirectedWeighted[T]()
while num_edges < len(self.connections ) - 1:
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = edges[index]
index += 1
lowerCAmelCase = disjoint_set.find_set(UpperCAmelCase_ )
lowerCAmelCase = disjoint_set.find_set(UpperCAmelCase_ )
if parent_u != parent_v:
num_edges += 1
graph.add_edge(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
disjoint_set.union(UpperCAmelCase_ , UpperCAmelCase_ )
return graph
| 33
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ =logging.get_logger(__name__)
UpperCAmelCase_ ={
"""microsoft/trocr-base-handwritten""": (
"""https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json"""
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class __UpperCamelCase ( __UpperCAmelCase ):
'''simple docstring'''
__a : Any ="""trocr"""
__a : List[Any] =["""past_key_values"""]
__a : Dict ={
"""num_attention_heads""": """decoder_attention_heads""",
"""hidden_size""": """d_model""",
"""num_hidden_layers""": """decoder_layers""",
}
def __init__( self , UpperCAmelCase_=5_02_65 , UpperCAmelCase_=10_24 , UpperCAmelCase_=12 , UpperCAmelCase_=16 , UpperCAmelCase_=40_96 , UpperCAmelCase_="gelu" , UpperCAmelCase_=5_12 , UpperCAmelCase_=0.1 , UpperCAmelCase_=0.0 , UpperCAmelCase_=0.0 , UpperCAmelCase_=2 , UpperCAmelCase_=0.02 , UpperCAmelCase_=0.0 , UpperCAmelCase_=True , UpperCAmelCase_=False , UpperCAmelCase_=True , UpperCAmelCase_=True , UpperCAmelCase_=1 , UpperCAmelCase_=0 , UpperCAmelCase_=2 , **UpperCAmelCase_ , ):
lowerCAmelCase = vocab_size
lowerCAmelCase = d_model
lowerCAmelCase = decoder_layers
lowerCAmelCase = decoder_attention_heads
lowerCAmelCase = decoder_ffn_dim
lowerCAmelCase = activation_function
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = dropout
lowerCAmelCase = attention_dropout
lowerCAmelCase = activation_dropout
lowerCAmelCase = init_std
lowerCAmelCase = decoder_layerdrop
lowerCAmelCase = use_cache
lowerCAmelCase = scale_embedding
lowerCAmelCase = use_learned_position_embeddings
lowerCAmelCase = layernorm_embedding
super().__init__(
pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , decoder_start_token_id=UpperCAmelCase_ , **UpperCAmelCase_ , )
| 703
|
def UpperCAmelCase ( _snake_case , _snake_case , _snake_case ):
def count_of_possible_combinations(_snake_case ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(_snake_case )
def UpperCAmelCase ( _snake_case , _snake_case , _snake_case ):
def count_of_possible_combinations_with_dp_array(
_snake_case , _snake_case ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
lowerCAmelCase = sum(
count_of_possible_combinations_with_dp_array(target - item , _snake_case )
for item in array )
lowerCAmelCase = answer
return answer
lowerCAmelCase = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(_snake_case , _snake_case )
def UpperCAmelCase ( _snake_case , _snake_case , _snake_case ):
lowerCAmelCase = [0] * (target + 1)
lowerCAmelCase = 1
for i in range(1 , target + 1 ):
for j in range(_snake_case ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase_ =3
UpperCAmelCase_ =5
UpperCAmelCase_ =[1, 2, 5]
print(combination_sum_iv(n, array, target))
| 33
| 0
|
def UpperCAmelCase ( _snake_case ):
lowerCAmelCase = [1]
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = 0, 0, 0
lowerCAmelCase = ugly_nums[ia] * 2
lowerCAmelCase = ugly_nums[ia] * 3
lowerCAmelCase = ugly_nums[ia] * 5
for _ in range(1 , _snake_case ):
lowerCAmelCase = min(_snake_case , _snake_case , _snake_case )
ugly_nums.append(_snake_case )
if next_num == next_a:
ia += 1
lowerCAmelCase = ugly_nums[ia] * 2
if next_num == next_a:
ia += 1
lowerCAmelCase = ugly_nums[ia] * 3
if next_num == next_a:
ia += 1
lowerCAmelCase = ugly_nums[ia] * 5
return ugly_nums[-1]
if __name__ == "__main__":
from doctest import testmod
testmod(verbose=True)
print(F'''{ugly_numbers(200) = }''')
| 704
|
import torch
from diffusers import StableDiffusionPipeline
UpperCAmelCase_ ="""path-to-your-trained-model"""
UpperCAmelCase_ =StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to("""cuda""")
UpperCAmelCase_ ="""A photo of sks dog in a bucket"""
UpperCAmelCase_ =pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0]
image.save("""dog-bucket.png""")
| 33
| 0
|
'''simple docstring'''
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
UpperCAmelCase_ =abspath(join(dirname(__file__), """src"""))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="""ignore""", category=FutureWarning)
def UpperCAmelCase ( _snake_case ):
config.addinivalue_line(
'''markers''' , '''is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested''' )
config.addinivalue_line(
'''markers''' , '''is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested''' )
config.addinivalue_line('''markers''' , '''is_pipeline_test: mark test to run only when pipelines are tested''' )
config.addinivalue_line('''markers''' , '''is_staging_test: mark test to run only in the staging environment''' )
config.addinivalue_line('''markers''' , '''accelerate_tests: mark test that require accelerate''' )
config.addinivalue_line('''markers''' , '''tool_tests: mark the tool tests that are run on their specific schedule''' )
def UpperCAmelCase ( _snake_case ):
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(_snake_case )
def UpperCAmelCase ( _snake_case ):
from transformers.testing_utils import pytest_terminal_summary_main
lowerCAmelCase = terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(_snake_case , id=_snake_case )
def UpperCAmelCase ( _snake_case , _snake_case ):
# If no tests are collected, pytest exists with code 5, which makes the CI fail.
if exitstatus == 5:
lowerCAmelCase = 0
# Doctest custom flag to ignore output.
UpperCAmelCase_ =doctest.register_optionflag("""IGNORE_RESULT""")
UpperCAmelCase_ =doctest.OutputChecker
class __UpperCamelCase ( __UpperCAmelCase ):
'''simple docstring'''
def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
UpperCAmelCase_ =CustomOutputChecker
UpperCAmelCase_ =HfDoctestModule
UpperCAmelCase_ =HfDocTestParser
| 705
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase_ ={
"""configuration_jukebox""": [
"""JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""JukeboxConfig""",
"""JukeboxPriorConfig""",
"""JukeboxVQVAEConfig""",
],
"""tokenization_jukebox""": ["""JukeboxTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ =[
"""JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""JukeboxModel""",
"""JukeboxPreTrainedModel""",
"""JukeboxVQVAE""",
"""JukeboxPrior""",
]
if TYPE_CHECKING:
from .configuration_jukebox import (
JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP,
JukeboxConfig,
JukeboxPriorConfig,
JukeboxVQVAEConfig,
)
from .tokenization_jukebox import JukeboxTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_jukebox import (
JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST,
JukeboxModel,
JukeboxPreTrainedModel,
JukeboxPrior,
JukeboxVQVAE,
)
else:
import sys
UpperCAmelCase_ =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 33
| 0
|
def UpperCAmelCase ( _snake_case = 100 ):
lowerCAmelCase = set()
lowerCAmelCase = 0
lowerCAmelCase = n + 1 # maximum limit
for a in range(2 , _snake_case ):
for b in range(2 , _snake_case ):
lowerCAmelCase = a**b # calculates the current power
collect_powers.add(_snake_case ) # adds the result to the set
return len(_snake_case )
if __name__ == "__main__":
print("""Number of terms """, solution(int(str(input()).strip())))
| 706
|
import json
import os
import pickle
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.tokenization_bart import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch
if is_faiss_available():
import faiss
@require_faiss
class __UpperCamelCase ( __UpperCAmelCase ):
'''simple docstring'''
def __snake_case ( self ):
lowerCAmelCase = tempfile.mkdtemp()
lowerCAmelCase = 8
# DPR tok
lowerCAmelCase = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
lowerCAmelCase = os.path.join(self.tmpdirname , '''dpr_tokenizer''' )
os.makedirs(UpperCAmelCase_ , exist_ok=UpperCAmelCase_ )
lowerCAmelCase = os.path.join(UpperCAmelCase_ , DPR_VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
# BART tok
lowerCAmelCase = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
lowerCAmelCase = dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_ ) ) ) )
lowerCAmelCase = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
lowerCAmelCase = {'''unk_token''': '''<unk>'''}
lowerCAmelCase = os.path.join(self.tmpdirname , '''bart_tokenizer''' )
os.makedirs(UpperCAmelCase_ , exist_ok=UpperCAmelCase_ )
lowerCAmelCase = os.path.join(UpperCAmelCase_ , BART_VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCAmelCase = os.path.join(UpperCAmelCase_ , BART_VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(UpperCAmelCase_ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(UpperCAmelCase_ ) )
def __snake_case ( self ):
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) )
def __snake_case ( self ):
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) )
def __snake_case ( self ):
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''bart_tokenizer''' ) )
def __snake_case ( self ):
shutil.rmtree(self.tmpdirname )
def __snake_case ( self ):
lowerCAmelCase = Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''text''': ['''foo''', '''bar'''],
'''title''': ['''Foo''', '''Bar'''],
'''embeddings''': [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )],
} )
dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT )
return dataset
def __snake_case ( self ):
lowerCAmelCase = self.get_dummy_dataset()
lowerCAmelCase = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , )
with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset:
lowerCAmelCase = dataset
lowerCAmelCase = RagRetriever(
UpperCAmelCase_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
return retriever
def __snake_case ( self , UpperCAmelCase_ ):
lowerCAmelCase = self.get_dummy_dataset()
lowerCAmelCase = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''custom''' , )
if from_disk:
lowerCAmelCase = os.path.join(self.tmpdirname , '''dataset''' )
lowerCAmelCase = os.path.join(self.tmpdirname , '''index.faiss''' )
dataset.get_index('''embeddings''' ).save(os.path.join(self.tmpdirname , '''index.faiss''' ) )
dataset.drop_index('''embeddings''' )
dataset.save_to_disk(os.path.join(self.tmpdirname , '''dataset''' ) )
del dataset
lowerCAmelCase = RagRetriever(
UpperCAmelCase_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
else:
lowerCAmelCase = RagRetriever(
UpperCAmelCase_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , UpperCAmelCase_ ) , )
return retriever
def __snake_case ( self ):
lowerCAmelCase = Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''text''': ['''foo''', '''bar'''],
'''title''': ['''Foo''', '''Bar'''],
'''embeddings''': [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )],
} )
dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT )
lowerCAmelCase = os.path.join(self.tmpdirname , '''hf_bert_base.hnswSQ8_correct_phi_128.c_index''' )
dataset.save_faiss_index('''embeddings''' , index_file_name + '''.index.dpr''' )
pickle.dump(dataset['''id'''] , open(index_file_name + '''.index_meta.dpr''' , '''wb''' ) )
lowerCAmelCase = os.path.join(self.tmpdirname , '''psgs_w100.tsv.pkl''' )
lowerCAmelCase = {sample['''id''']: [sample['''text'''], sample['''title''']] for sample in dataset}
pickle.dump(UpperCAmelCase_ , open(UpperCAmelCase_ , '''wb''' ) )
lowerCAmelCase = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''legacy''' , index_path=self.tmpdirname , )
lowerCAmelCase = RagRetriever(
UpperCAmelCase_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() )
return retriever
def __snake_case ( self ):
lowerCAmelCase = 1
lowerCAmelCase = self.get_dummy_canonical_hf_index_retriever()
lowerCAmelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = retriever.retrieve(UpperCAmelCase_ , n_docs=UpperCAmelCase_ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(UpperCAmelCase_ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , UpperCAmelCase_ )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def __snake_case ( self ):
lowerCAmelCase = self.get_dummy_canonical_hf_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset:
lowerCAmelCase = self.get_dummy_dataset()
retriever.save_pretrained(UpperCAmelCase_ )
lowerCAmelCase = RagRetriever.from_pretrained(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
lowerCAmelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowerCAmelCase = retriever.retrieve(UpperCAmelCase_ , n_docs=1 )
self.assertTrue(out is not None )
def __snake_case ( self ):
lowerCAmelCase = 1
lowerCAmelCase = self.get_dummy_custom_hf_index_retriever(from_disk=UpperCAmelCase_ )
lowerCAmelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = retriever.retrieve(UpperCAmelCase_ , n_docs=UpperCAmelCase_ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(UpperCAmelCase_ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , UpperCAmelCase_ )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def __snake_case ( self ):
lowerCAmelCase = self.get_dummy_custom_hf_index_retriever(from_disk=UpperCAmelCase_ )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(UpperCAmelCase_ )
lowerCAmelCase = RagRetriever.from_pretrained(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
lowerCAmelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowerCAmelCase = retriever.retrieve(UpperCAmelCase_ , n_docs=1 )
self.assertTrue(out is not None )
def __snake_case ( self ):
lowerCAmelCase = 1
lowerCAmelCase = self.get_dummy_custom_hf_index_retriever(from_disk=UpperCAmelCase_ )
lowerCAmelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = retriever.retrieve(UpperCAmelCase_ , n_docs=UpperCAmelCase_ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(UpperCAmelCase_ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , UpperCAmelCase_ )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def __snake_case ( self ):
lowerCAmelCase = self.get_dummy_custom_hf_index_retriever(from_disk=UpperCAmelCase_ )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(UpperCAmelCase_ )
lowerCAmelCase = RagRetriever.from_pretrained(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
lowerCAmelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowerCAmelCase = retriever.retrieve(UpperCAmelCase_ , n_docs=1 )
self.assertTrue(out is not None )
def __snake_case ( self ):
lowerCAmelCase = 1
lowerCAmelCase = self.get_dummy_legacy_index_retriever()
lowerCAmelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = retriever.retrieve(UpperCAmelCase_ , n_docs=UpperCAmelCase_ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(UpperCAmelCase_ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''text'''] ) , UpperCAmelCase_ )
self.assertEqual(doc_dicts[0]['''text'''][0] , '''bar''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''text'''][0] , '''foo''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def __snake_case ( self ):
lowerCAmelCase = self.get_dummy_legacy_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(UpperCAmelCase_ )
lowerCAmelCase = RagRetriever.from_pretrained(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
lowerCAmelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowerCAmelCase = retriever.retrieve(UpperCAmelCase_ , n_docs=1 )
self.assertTrue(out is not None )
@require_torch
@require_tokenizers
@require_sentencepiece
def __snake_case ( self ):
import torch
lowerCAmelCase = 1
lowerCAmelCase = self.get_dummy_canonical_hf_index_retriever()
lowerCAmelCase = [[5, 7], [10, 11]]
lowerCAmelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowerCAmelCase = retriever(UpperCAmelCase_ , UpperCAmelCase_ , prefix=retriever.config.generator.prefix , n_docs=UpperCAmelCase_ )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = (
out['''context_input_ids'''],
out['''context_attention_mask'''],
out['''retrieved_doc_embeds'''],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , np.ndarray )
lowerCAmelCase = retriever(
UpperCAmelCase_ , UpperCAmelCase_ , prefix=retriever.config.generator.prefix , n_docs=UpperCAmelCase_ , return_tensors='''pt''' , )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = ( # noqa: F841
out['''context_input_ids'''],
out['''context_attention_mask'''],
out['''retrieved_doc_embeds'''],
out['''doc_ids'''],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(UpperCAmelCase_ , torch.Tensor )
self.assertIsInstance(UpperCAmelCase_ , torch.Tensor )
self.assertIsInstance(UpperCAmelCase_ , torch.Tensor )
@require_torch
@require_tokenizers
@require_sentencepiece
def __snake_case ( self ):
lowerCAmelCase = self.get_dpr_ctx_encoder_tokenizer()
lowerCAmelCase = 1
lowerCAmelCase = self.get_dummy_custom_hf_index_retriever(from_disk=UpperCAmelCase_ )
retriever.set_ctx_encoder_tokenizer(UpperCAmelCase_ )
lowerCAmelCase = [[5, 7], [10, 11]]
lowerCAmelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowerCAmelCase = retriever(UpperCAmelCase_ , UpperCAmelCase_ , prefix=retriever.config.generator.prefix , n_docs=UpperCAmelCase_ )
self.assertEqual(
len(UpperCAmelCase_ ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs
self.assertEqual(
all(k in out for k in ('''tokenized_doc_ids''', '''tokenized_doc_attention_mask''') ) , UpperCAmelCase_ ) # check for doc token related keys in dictionary.
| 33
| 0
|
import argparse
import datetime
import json
import time
import warnings
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params
UpperCAmelCase_ =getLogger(__name__)
UpperCAmelCase_ ="""cuda""" if torch.cuda.is_available() else """cpu"""
def UpperCAmelCase ( _snake_case , _snake_case , _snake_case , _snake_case = 8 , _snake_case = DEFAULT_DEVICE , _snake_case=False , _snake_case="summarization" , _snake_case=None , **_snake_case , ):
lowerCAmelCase = Path(_snake_case ).open('''w''' , encoding='''utf-8''' )
lowerCAmelCase = str(_snake_case )
lowerCAmelCase = AutoModelForSeqaSeqLM.from_pretrained(_snake_case ).to(_snake_case )
if fpaa:
lowerCAmelCase = model.half()
lowerCAmelCase = AutoTokenizer.from_pretrained(_snake_case )
logger.info(F"""Inferred tokenizer type: {tokenizer.__class__}""" ) # if this is wrong, check config.model_type.
lowerCAmelCase = time.time()
# update config with task specific params
use_task_specific_params(_snake_case , _snake_case )
if prefix is None:
lowerCAmelCase = prefix or getattr(model.config , '''prefix''' , '''''' ) or ''''''
for examples_chunk in tqdm(list(chunks(_snake_case , _snake_case ) ) ):
lowerCAmelCase = [prefix + text for text in examples_chunk]
lowerCAmelCase = tokenizer(_snake_case , return_tensors='''pt''' , truncation=_snake_case , padding='''longest''' ).to(_snake_case )
lowerCAmelCase = model.generate(
input_ids=batch.input_ids , attention_mask=batch.attention_mask , **_snake_case , )
lowerCAmelCase = tokenizer.batch_decode(_snake_case , skip_special_tokens=_snake_case , clean_up_tokenization_spaces=_snake_case )
for hypothesis in dec:
fout.write(hypothesis + '''\n''' )
fout.flush()
fout.close()
lowerCAmelCase = int(time.time() - start_time ) # seconds
lowerCAmelCase = len(_snake_case )
return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs , 4 )}
def UpperCAmelCase ( ):
return datetime.datetime.now().strftime('''%Y-%m-%d %H:%M:%S''' )
def UpperCAmelCase ( _snake_case=True ):
lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument('''model_name''' , type=_snake_case , help='''like facebook/bart-large-cnn,t5-base, etc.''' )
parser.add_argument('''input_path''' , type=_snake_case , help='''like cnn_dm/test.source''' )
parser.add_argument('''save_path''' , type=_snake_case , help='''where to save summaries''' )
parser.add_argument('''--reference_path''' , type=_snake_case , required=_snake_case , help='''like cnn_dm/test.target''' )
parser.add_argument('''--score_path''' , type=_snake_case , required=_snake_case , default='''metrics.json''' , help='''where to save metrics''' )
parser.add_argument('''--device''' , type=_snake_case , required=_snake_case , default=_snake_case , help='''cuda, cuda:1, cpu etc.''' )
parser.add_argument(
'''--prefix''' , type=_snake_case , required=_snake_case , default=_snake_case , help='''will be added to the begininng of src examples''' )
parser.add_argument('''--task''' , type=_snake_case , default='''summarization''' , help='''used for task_specific_params + metrics''' )
parser.add_argument('''--bs''' , type=_snake_case , default=8 , required=_snake_case , help='''batch size''' )
parser.add_argument(
'''--n_obs''' , type=_snake_case , default=-1 , required=_snake_case , help='''How many observations. Defaults to all.''' )
parser.add_argument('''--fp16''' , action='''store_true''' )
parser.add_argument('''--dump-args''' , action='''store_true''' , help='''print the custom hparams with the results''' )
parser.add_argument(
'''--info''' , nargs='''?''' , type=_snake_case , const=datetime_now() , help=(
'''use in conjunction w/ --dump-args to print with the results whatever other info you\'d like, e.g.'''
''' lang=en-ru. If no value is passed, the current datetime string will be used.'''
) , )
# Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate
lowerCAmelCase , lowerCAmelCase = parser.parse_known_args()
lowerCAmelCase = parse_numeric_n_bool_cl_kwargs(_snake_case )
if parsed_args and verbose:
print(F"""parsed the following generate kwargs: {parsed_args}""" )
lowerCAmelCase = [''' ''' + x.rstrip() if '''t5''' in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()]
if args.n_obs > 0:
lowerCAmelCase = examples[: args.n_obs]
Path(args.save_path ).parent.mkdir(exist_ok=_snake_case )
if args.reference_path is None and Path(args.score_path ).exists():
warnings.warn(F"""score_path {args.score_path} will be overwritten unless you type ctrl-c.""" )
if args.device == "cpu" and args.fpaa:
# this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half'
raise ValueError('''Can\'t mix --fp16 and --device cpu''' )
lowerCAmelCase = generate_summaries_or_translations(
_snake_case , args.save_path , args.model_name , batch_size=args.bs , device=args.device , fpaa=args.fpaa , task=args.task , prefix=args.prefix , **_snake_case , )
if args.reference_path is None:
return {}
# Compute scores
lowerCAmelCase = calculate_bleu if '''translation''' in args.task else calculate_rouge
lowerCAmelCase = [x.rstrip() for x in open(args.save_path ).readlines()]
lowerCAmelCase = [x.rstrip() for x in open(args.reference_path ).readlines()][: len(_snake_case )]
lowerCAmelCase = score_fn(_snake_case , _snake_case )
scores.update(_snake_case )
if args.dump_args:
scores.update(_snake_case )
if args.info:
lowerCAmelCase = args.info
if verbose:
print(_snake_case )
if args.score_path is not None:
json.dump(_snake_case , open(args.score_path , '''w''' ) )
return scores
if __name__ == "__main__":
# Usage for MT:
# python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@
run_generate(verbose=True)
| 707
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ =logging.get_logger(__name__)
UpperCAmelCase_ ={
"""google/switch-base-8""": """https://huggingface.co/google/switch-base-8/blob/main/config.json""",
}
class __UpperCamelCase ( __UpperCAmelCase ):
'''simple docstring'''
__a : Any ="""switch_transformers"""
__a : Union[str, Any] =["""past_key_values"""]
__a : Dict ={"""hidden_size""": """d_model""", """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers"""}
def __init__( self , UpperCAmelCase_=3_21_28 , UpperCAmelCase_=7_68 , UpperCAmelCase_=64 , UpperCAmelCase_=20_48 , UpperCAmelCase_=64 , UpperCAmelCase_=12 , UpperCAmelCase_=3 , UpperCAmelCase_=12 , UpperCAmelCase_=3 , UpperCAmelCase_=12 , UpperCAmelCase_=8 , UpperCAmelCase_=False , UpperCAmelCase_=0.01 , UpperCAmelCase_="float32" , UpperCAmelCase_=False , UpperCAmelCase_=32 , UpperCAmelCase_=1_28 , UpperCAmelCase_=0.1 , UpperCAmelCase_=1E-6 , UpperCAmelCase_=0.001 , UpperCAmelCase_=0.001 , UpperCAmelCase_=1.0 , UpperCAmelCase_="relu" , UpperCAmelCase_=True , UpperCAmelCase_=False , UpperCAmelCase_=True , UpperCAmelCase_=0 , UpperCAmelCase_=1 , **UpperCAmelCase_ , ):
lowerCAmelCase = vocab_size
lowerCAmelCase = d_model
lowerCAmelCase = d_kv
lowerCAmelCase = d_ff
lowerCAmelCase = num_sparse_encoder_layers
lowerCAmelCase = num_layers
lowerCAmelCase = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
lowerCAmelCase = num_sparse_decoder_layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_encoder_layers > 0:
lowerCAmelCase = self.num_layers // self.num_sparse_encoder_layers
else:
lowerCAmelCase = self.num_layers # HACK: this will create 0 sparse layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_decoder_layers > 0:
lowerCAmelCase = self.num_decoder_layers // self.num_sparse_decoder_layers
else:
lowerCAmelCase = self.num_decoder_layers # HACK: this will create 0 sparse layers
lowerCAmelCase = num_heads
lowerCAmelCase = num_experts
lowerCAmelCase = expert_capacity
lowerCAmelCase = router_bias
lowerCAmelCase = router_jitter_noise
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(F"""`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}""" )
lowerCAmelCase = router_dtype
lowerCAmelCase = router_ignore_padding_tokens
lowerCAmelCase = relative_attention_num_buckets
lowerCAmelCase = relative_attention_max_distance
lowerCAmelCase = dropout_rate
lowerCAmelCase = layer_norm_epsilon
lowerCAmelCase = initializer_factor
lowerCAmelCase = feed_forward_proj
lowerCAmelCase = use_cache
lowerCAmelCase = add_router_probs
lowerCAmelCase = router_z_loss_coef
lowerCAmelCase = router_aux_loss_coef
lowerCAmelCase = self.feed_forward_proj.split('''-''' )
lowerCAmelCase = act_info[-1]
lowerCAmelCase = act_info[0] == '''gated'''
if len(UpperCAmelCase_ ) > 1 and act_info[0] != "gated" or len(UpperCAmelCase_ ) > 2:
raise ValueError(
F"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
'''Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '''
'''\'gated-gelu\' or \'relu\'''' )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
lowerCAmelCase = '''gelu_new'''
super().__init__(
pad_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , is_encoder_decoder=UpperCAmelCase_ , **UpperCAmelCase_ , )
| 33
| 0
|
import datasets
from .evaluate import evaluate
UpperCAmelCase_ ="""\
@article{hendrycks2021cuad,
title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},
author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},
journal={arXiv preprint arXiv:2103.06268},
year={2021}
}
"""
UpperCAmelCase_ ="""
This metric wrap the official scoring script for version 1 of the Contract
Understanding Atticus Dataset (CUAD).
Contract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510
commercial legal contracts that have been manually labeled to identify 41 categories of important
clauses that lawyers look for when reviewing contracts in connection with corporate transactions.
"""
UpperCAmelCase_ ="""
Computes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall).
Args:
predictions: List of question-answers dictionaries with the following key-values:
- 'id': id of the question-answer pair as given in the references (see below)
- 'prediction_text': list of possible texts for the answer, as a list of strings
depending on a threshold on the confidence probability of each prediction.
references: List of question-answers dictionaries with the following key-values:
- 'id': id of the question-answer pair (see above),
- 'answers': a Dict in the CUAD dataset format
{
'text': list of possible texts for the answer, as a list of strings
'answer_start': list of start positions for the answer, as a list of ints
}
Note that answer_start values are not taken into account to compute the metric.
Returns:
'exact_match': Exact match (the normalized answer exactly match the gold answer)
'f1': The F-score of predicted tokens versus the gold answer
'aupr': Area Under the Precision-Recall curve
'prec_at_80_recall': Precision at 80% recall
'prec_at_90_recall': Precision at 90% recall
Examples:
>>> predictions = [{'prediction_text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.'], 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}]
>>> references = [{'answers': {'answer_start': [143, 49], 'text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.']}, 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}]
>>> cuad_metric = datasets.load_metric(\"cuad\")
>>> results = cuad_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 100.0, 'f1': 100.0, 'aupr': 0.0, 'prec_at_80_recall': 1.0, 'prec_at_90_recall': 1.0}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __UpperCamelCase ( datasets.Metric ):
'''simple docstring'''
def __snake_case ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': {
'''id''': datasets.Value('''string''' ),
'''prediction_text''': datasets.features.Sequence(datasets.Value('''string''' ) ),
},
'''references''': {
'''id''': datasets.Value('''string''' ),
'''answers''': datasets.features.Sequence(
{
'''text''': datasets.Value('''string''' ),
'''answer_start''': datasets.Value('''int32''' ),
} ),
},
} ) , codebase_urls=['''https://www.atticusprojectai.org/cuad'''] , reference_urls=['''https://www.atticusprojectai.org/cuad'''] , )
def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ ):
lowerCAmelCase = {prediction['''id''']: prediction['''prediction_text'''] for prediction in predictions}
lowerCAmelCase = [
{
'''paragraphs''': [
{
'''qas''': [
{
'''answers''': [{'''text''': answer_text} for answer_text in ref['''answers''']['''text''']],
'''id''': ref['''id'''],
}
for ref in references
]
}
]
}
]
lowerCAmelCase = evaluate(dataset=UpperCAmelCase_ , predictions=UpperCAmelCase_ )
return score
| 708
|
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def UpperCAmelCase ( _snake_case ):
lowerCAmelCase , lowerCAmelCase = analyze_text(_snake_case )
lowerCAmelCase = list(''' ''' + ascii_lowercase )
# what is our total sum of probabilities.
lowerCAmelCase = sum(single_char_strings.values() )
# one length string
lowerCAmelCase = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
lowerCAmelCase = single_char_strings[ch]
lowerCAmelCase = my_str / all_sum
my_fir_sum += prob * math.loga(_snake_case ) # entropy formula.
# print entropy
print(F"""{round(-1 * my_fir_sum ):.1f}""" )
# two len string
lowerCAmelCase = sum(two_char_strings.values() )
lowerCAmelCase = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
lowerCAmelCase = cha + cha
if sequence in two_char_strings:
lowerCAmelCase = two_char_strings[sequence]
lowerCAmelCase = int(_snake_case ) / all_sum
my_sec_sum += prob * math.loga(_snake_case )
# print second entropy
print(F"""{round(-1 * my_sec_sum ):.1f}""" )
# print the difference between them
print(F"""{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}""" )
def UpperCAmelCase ( _snake_case ):
lowerCAmelCase = Counter() # type: ignore
lowerCAmelCase = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 , len(_snake_case ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def UpperCAmelCase ( ):
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 33
| 0
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
UpperCAmelCase_ =logging.get_logger(__name__)
if is_vision_available():
import PIL
class __UpperCamelCase ( __UpperCAmelCase ):
'''simple docstring'''
__a : List[str] =["""pixel_values"""]
def __init__( self , UpperCAmelCase_ = True , UpperCAmelCase_ = None , UpperCAmelCase_ = PILImageResampling.BICUBIC , UpperCAmelCase_ = True , UpperCAmelCase_ = None , UpperCAmelCase_ = True , UpperCAmelCase_ = 1 / 2_55 , UpperCAmelCase_ = True , UpperCAmelCase_ = None , UpperCAmelCase_ = None , UpperCAmelCase_ = True , **UpperCAmelCase_ , ):
super().__init__(**UpperCAmelCase_ )
lowerCAmelCase = size if size is not None else {'''shortest_edge''': 2_24}
lowerCAmelCase = get_size_dict(UpperCAmelCase_ , default_to_square=UpperCAmelCase_ )
lowerCAmelCase = crop_size if crop_size is not None else {'''height''': 2_24, '''width''': 2_24}
lowerCAmelCase = get_size_dict(UpperCAmelCase_ , default_to_square=UpperCAmelCase_ , param_name='''crop_size''' )
lowerCAmelCase = do_resize
lowerCAmelCase = size
lowerCAmelCase = resample
lowerCAmelCase = do_center_crop
lowerCAmelCase = crop_size
lowerCAmelCase = do_rescale
lowerCAmelCase = rescale_factor
lowerCAmelCase = do_normalize
lowerCAmelCase = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
lowerCAmelCase = image_std if image_std is not None else OPENAI_CLIP_STD
lowerCAmelCase = do_convert_rgb
def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = PILImageResampling.BICUBIC , UpperCAmelCase_ = None , **UpperCAmelCase_ , ):
lowerCAmelCase = get_size_dict(UpperCAmelCase_ , default_to_square=UpperCAmelCase_ )
if "shortest_edge" not in size:
raise ValueError(F"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
lowerCAmelCase = get_resize_output_image_size(UpperCAmelCase_ , size=size['''shortest_edge'''] , default_to_square=UpperCAmelCase_ )
return resize(UpperCAmelCase_ , size=UpperCAmelCase_ , resample=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_ )
def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = None , **UpperCAmelCase_ , ):
lowerCAmelCase = get_size_dict(UpperCAmelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(F"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""" )
return center_crop(UpperCAmelCase_ , size=(size['''height'''], size['''width''']) , data_format=UpperCAmelCase_ , **UpperCAmelCase_ )
def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = None , **UpperCAmelCase_ , ):
return rescale(UpperCAmelCase_ , scale=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_ )
def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = None , **UpperCAmelCase_ , ):
return normalize(UpperCAmelCase_ , mean=UpperCAmelCase_ , std=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_ )
def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ = None , UpperCAmelCase_ = None , UpperCAmelCase_ = None , UpperCAmelCase_ = None , UpperCAmelCase_ = None , UpperCAmelCase_ = None , UpperCAmelCase_ = None , UpperCAmelCase_ = None , UpperCAmelCase_ = None , UpperCAmelCase_ = None , UpperCAmelCase_ = None , UpperCAmelCase_ = None , UpperCAmelCase_ = ChannelDimension.FIRST , **UpperCAmelCase_ , ):
lowerCAmelCase = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase = size if size is not None else self.size
lowerCAmelCase = get_size_dict(UpperCAmelCase_ , param_name='''size''' , default_to_square=UpperCAmelCase_ )
lowerCAmelCase = resample if resample is not None else self.resample
lowerCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCAmelCase = crop_size if crop_size is not None else self.crop_size
lowerCAmelCase = get_size_dict(UpperCAmelCase_ , param_name='''crop_size''' , default_to_square=UpperCAmelCase_ )
lowerCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
lowerCAmelCase = image_mean if image_mean is not None else self.image_mean
lowerCAmelCase = image_std if image_std is not None else self.image_std
lowerCAmelCase = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
lowerCAmelCase = make_list_of_images(UpperCAmelCase_ )
if not valid_images(UpperCAmelCase_ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
lowerCAmelCase = [convert_to_rgb(UpperCAmelCase_ ) for image in images]
# All transformations expect numpy arrays.
lowerCAmelCase = [to_numpy_array(UpperCAmelCase_ ) for image in images]
if do_resize:
lowerCAmelCase = [self.resize(image=UpperCAmelCase_ , size=UpperCAmelCase_ , resample=UpperCAmelCase_ ) for image in images]
if do_center_crop:
lowerCAmelCase = [self.center_crop(image=UpperCAmelCase_ , size=UpperCAmelCase_ ) for image in images]
if do_rescale:
lowerCAmelCase = [self.rescale(image=UpperCAmelCase_ , scale=UpperCAmelCase_ ) for image in images]
if do_normalize:
lowerCAmelCase = [self.normalize(image=UpperCAmelCase_ , mean=UpperCAmelCase_ , std=UpperCAmelCase_ ) for image in images]
lowerCAmelCase = [to_channel_dimension_format(UpperCAmelCase_ , UpperCAmelCase_ ) for image in images]
lowerCAmelCase = {'''pixel_values''': images}
return BatchFeature(data=UpperCAmelCase_ , tensor_type=UpperCAmelCase_ )
| 709
|
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __UpperCamelCase ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
__a : Tuple =IFInpaintingSuperResolutionPipeline
__a : Dict =TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"""width""", """height"""}
__a : int =TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({"""original_image"""} )
__a : Union[str, Any] =PipelineTesterMixin.required_optional_params - {"""latents"""}
def __snake_case ( self ):
return self._get_superresolution_dummy_components()
def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_=0 ):
if str(UpperCAmelCase_ ).startswith('''mps''' ):
lowerCAmelCase = torch.manual_seed(UpperCAmelCase_ )
else:
lowerCAmelCase = torch.Generator(device=UpperCAmelCase_ ).manual_seed(UpperCAmelCase_ )
lowerCAmelCase = floats_tensor((1, 3, 16, 16) , rng=random.Random(UpperCAmelCase_ ) ).to(UpperCAmelCase_ )
lowerCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCAmelCase_ ) ).to(UpperCAmelCase_ )
lowerCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCAmelCase_ ) ).to(UpperCAmelCase_ )
lowerCAmelCase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''original_image''': original_image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def __snake_case ( self ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def __snake_case ( self ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' )
def __snake_case ( self ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def __snake_case ( self ):
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def __snake_case ( self ):
self._test_save_load_local()
def __snake_case ( self ):
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 33
| 0
|
import warnings
from ...utils import logging
from .image_processing_glpn import GLPNImageProcessor
UpperCAmelCase_ =logging.get_logger(__name__)
class __UpperCamelCase ( __UpperCAmelCase ):
'''simple docstring'''
def __init__( self , *UpperCAmelCase_ , **UpperCAmelCase_ ):
warnings.warn(
'''The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use GLPNImageProcessor instead.''' , UpperCAmelCase_ , )
super().__init__(*UpperCAmelCase_ , **UpperCAmelCase_ )
| 710
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
UpperCAmelCase_ ={
"""configuration_efficientformer""": [
"""EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""EfficientFormerConfig""",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ =["""EfficientFormerImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ =[
"""EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""EfficientFormerForImageClassification""",
"""EfficientFormerForImageClassificationWithTeacher""",
"""EfficientFormerModel""",
"""EfficientFormerPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ =[
"""TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFEfficientFormerForImageClassification""",
"""TFEfficientFormerForImageClassificationWithTeacher""",
"""TFEfficientFormerModel""",
"""TFEfficientFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientformer import EfficientFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientformer import (
EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientFormerForImageClassification,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerModel,
EfficientFormerPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
TFEfficientFormerPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 33
| 0
|
'''simple docstring'''
import importlib
import inspect
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
UpperCAmelCase_ ="""src/transformers"""
# This is to make sure the transformers module imported is the one in the repo.
UpperCAmelCase_ =importlib.util.spec_from_file_location(
"""transformers""",
os.path.join(PATH_TO_TRANSFORMERS, """__init__.py"""),
submodule_search_locations=[PATH_TO_TRANSFORMERS],
)
UpperCAmelCase_ =spec.loader.load_module()
UpperCAmelCase_ =transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
UpperCAmelCase_ =re.compile("""\[(.+?)\]\((https://huggingface\.co/.+?)\)""")
UpperCAmelCase_ ={
"""CLIPConfigMixin""",
"""DecisionTransformerConfigMixin""",
"""EncoderDecoderConfigMixin""",
"""RagConfigMixin""",
"""SpeechEncoderDecoderConfigMixin""",
"""VisionEncoderDecoderConfigMixin""",
"""VisionTextDualEncoderConfigMixin""",
}
def UpperCAmelCase ( ):
lowerCAmelCase = []
for config_class in list(CONFIG_MAPPING.values() ):
lowerCAmelCase = False
# source code of `config_class`
lowerCAmelCase = inspect.getsource(_snake_case )
lowerCAmelCase = _re_checkpoint.findall(_snake_case )
for checkpoint in checkpoints:
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
lowerCAmelCase , lowerCAmelCase = checkpoint
# verify the checkpoint name corresponds to the checkpoint link
lowerCAmelCase = F"""https://huggingface.co/{ckpt_name}"""
if ckpt_link == ckpt_link_from_name:
lowerCAmelCase = True
break
lowerCAmelCase = config_class.__name__
if not checkpoint_found and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(_snake_case )
if len(_snake_case ) > 0:
lowerCAmelCase = '''\n'''.join(sorted(_snake_case ) )
raise ValueError(F"""The following configurations don't contain any valid checkpoint:\n{message}""" )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 711
|
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
UpperCAmelCase_ =datasets.utils.logging.get_logger(__name__)
@dataclass
class __UpperCamelCase ( datasets.BuilderConfig ):
'''simple docstring'''
__a : Optional[datasets.Features] =None
__a : str ="utf-8"
__a : Optional[str] =None
__a : Optional[str] =None
__a : bool =True # deprecated
__a : Optional[int] =None # deprecated
__a : int =1_0 << 2_0 # 10MB
__a : Optional[bool] =None
class __UpperCamelCase ( datasets.ArrowBasedBuilder ):
'''simple docstring'''
__a : str =JsonConfig
def __snake_case ( self ):
if self.config.block_size is not None:
logger.warning('''The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead''' )
lowerCAmelCase = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
'''The JSON loader parameter `use_threads` is deprecated and doesn\'t have any effect anymore.''' )
if self.config.newlines_in_values is not None:
raise ValueError('''The JSON loader parameter `newlines_in_values` is no longer supported''' )
return datasets.DatasetInfo(features=self.config.features )
def __snake_case ( self , UpperCAmelCase_ ):
if not self.config.data_files:
raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
lowerCAmelCase = dl_manager.download_and_extract(self.config.data_files )
if isinstance(UpperCAmelCase_ , (str, list, tuple) ):
lowerCAmelCase = data_files
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
lowerCAmelCase = [files]
lowerCAmelCase = [dl_manager.iter_files(UpperCAmelCase_ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )]
lowerCAmelCase = []
for split_name, files in data_files.items():
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
lowerCAmelCase = [files]
lowerCAmelCase = [dl_manager.iter_files(UpperCAmelCase_ ) for file in files]
splits.append(datasets.SplitGenerator(name=UpperCAmelCase_ , gen_kwargs={'''files''': files} ) )
return splits
def __snake_case ( self , UpperCAmelCase_ ):
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features ) - set(pa_table.column_names ):
lowerCAmelCase = self.config.features.arrow_schema.field(UpperCAmelCase_ ).type
lowerCAmelCase = pa_table.append_column(UpperCAmelCase_ , pa.array([None] * len(UpperCAmelCase_ ) , type=UpperCAmelCase_ ) )
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
lowerCAmelCase = table_cast(UpperCAmelCase_ , self.config.features.arrow_schema )
return pa_table
def __snake_case ( self , UpperCAmelCase_ ):
for file_idx, file in enumerate(itertools.chain.from_iterable(UpperCAmelCase_ ) ):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(UpperCAmelCase_ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
lowerCAmelCase = json.load(UpperCAmelCase_ )
# We keep only the field we are interested in
lowerCAmelCase = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(UpperCAmelCase_ , (list, tuple) ):
lowerCAmelCase = set().union(*[row.keys() for row in dataset] )
lowerCAmelCase = {col: [row.get(UpperCAmelCase_ ) for row in dataset] for col in keys}
else:
lowerCAmelCase = dataset
lowerCAmelCase = pa.Table.from_pydict(UpperCAmelCase_ )
yield file_idx, self._cast_table(UpperCAmelCase_ )
# If the file has one json object per line
else:
with open(UpperCAmelCase_ , '''rb''' ) as f:
lowerCAmelCase = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
lowerCAmelCase = max(self.config.chunksize // 32 , 16 << 10 )
lowerCAmelCase = (
self.config.encoding_errors if self.config.encoding_errors is not None else '''strict'''
)
while True:
lowerCAmelCase = f.read(self.config.chunksize )
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(UpperCAmelCase_ )
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
lowerCAmelCase = batch.decode(self.config.encoding , errors=UpperCAmelCase_ ).encode('''utf-8''' )
try:
while True:
try:
lowerCAmelCase = paj.read_json(
io.BytesIO(UpperCAmelCase_ ) , read_options=paj.ReadOptions(block_size=UpperCAmelCase_ ) )
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(UpperCAmelCase_ , pa.ArrowInvalid )
and "straddling" not in str(UpperCAmelCase_ )
or block_size > len(UpperCAmelCase_ )
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
F"""Batch of {len(UpperCAmelCase_ )} bytes couldn't be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.""" )
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
UpperCAmelCase_ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
lowerCAmelCase = json.load(UpperCAmelCase_ )
except json.JSONDecodeError:
logger.error(F"""Failed to read file '{file}' with error {type(UpperCAmelCase_ )}: {e}""" )
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): # list is the only sequence type supported in JSON
try:
lowerCAmelCase = set().union(*[row.keys() for row in dataset] )
lowerCAmelCase = {col: [row.get(UpperCAmelCase_ ) for row in dataset] for col in keys}
lowerCAmelCase = pa.Table.from_pydict(UpperCAmelCase_ )
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(F"""Failed to read file '{file}' with error {type(UpperCAmelCase_ )}: {e}""" )
raise ValueError(F"""Not able to read records in the JSON file at {file}.""" ) from None
yield file_idx, self._cast_table(UpperCAmelCase_ )
break
else:
logger.error(F"""Failed to read file '{file}' with error {type(UpperCAmelCase_ )}: {e}""" )
raise ValueError(
F"""Not able to read records in the JSON file at {file}. """
F"""You should probably indicate the field of the JSON file containing your records. """
F"""This JSON file contain the following fields: {str(list(dataset.keys() ) )}. """
F"""Select the correct one and provide it as `field='XXX'` to the dataset loading method. """ ) from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(UpperCAmelCase_ )
batch_idx += 1
| 33
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase_ ={
"""configuration_x_clip""": [
"""XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""XCLIPConfig""",
"""XCLIPTextConfig""",
"""XCLIPVisionConfig""",
],
"""processing_x_clip""": ["""XCLIPProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ =[
"""XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XCLIPModel""",
"""XCLIPPreTrainedModel""",
"""XCLIPTextModel""",
"""XCLIPVisionModel""",
]
if TYPE_CHECKING:
from .configuration_x_clip import (
XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
XCLIPConfig,
XCLIPTextConfig,
XCLIPVisionConfig,
)
from .processing_x_clip import XCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_x_clip import (
XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
XCLIPModel,
XCLIPPreTrainedModel,
XCLIPTextModel,
XCLIPVisionModel,
)
else:
import sys
UpperCAmelCase_ =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 712
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
UpperCAmelCase_ =logging.get_logger(__name__)
class __UpperCamelCase ( __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__a : Optional[Any] ="""maskformer-swin"""
__a : Optional[int] ={
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self , UpperCAmelCase_=2_24 , UpperCAmelCase_=4 , UpperCAmelCase_=3 , UpperCAmelCase_=96 , UpperCAmelCase_=[2, 2, 6, 2] , UpperCAmelCase_=[3, 6, 12, 24] , UpperCAmelCase_=7 , UpperCAmelCase_=4.0 , UpperCAmelCase_=True , UpperCAmelCase_=0.0 , UpperCAmelCase_=0.0 , UpperCAmelCase_=0.1 , UpperCAmelCase_="gelu" , UpperCAmelCase_=False , UpperCAmelCase_=0.02 , UpperCAmelCase_=1E-5 , UpperCAmelCase_=None , UpperCAmelCase_=None , **UpperCAmelCase_ , ):
super().__init__(**UpperCAmelCase_ )
lowerCAmelCase = image_size
lowerCAmelCase = patch_size
lowerCAmelCase = num_channels
lowerCAmelCase = embed_dim
lowerCAmelCase = depths
lowerCAmelCase = len(UpperCAmelCase_ )
lowerCAmelCase = num_heads
lowerCAmelCase = window_size
lowerCAmelCase = mlp_ratio
lowerCAmelCase = qkv_bias
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = drop_path_rate
lowerCAmelCase = hidden_act
lowerCAmelCase = use_absolute_embeddings
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowerCAmelCase = int(embed_dim * 2 ** (len(UpperCAmelCase_ ) - 1) )
lowerCAmelCase = ['''stem'''] + [F"""stage{idx}""" for idx in range(1 , len(UpperCAmelCase_ ) + 1 )]
lowerCAmelCase , lowerCAmelCase = get_aligned_output_features_output_indices(
out_features=UpperCAmelCase_ , out_indices=UpperCAmelCase_ , stage_names=self.stage_names )
| 33
| 0
|
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPanoramaPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
@skip_mps
class __UpperCamelCase ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
__a : Union[str, Any] =StableDiffusionPanoramaPipeline
__a : Union[str, Any] =TEXT_TO_IMAGE_PARAMS
__a : Union[str, Any] =TEXT_TO_IMAGE_BATCH_PARAMS
__a : List[Any] =TEXT_TO_IMAGE_IMAGE_PARAMS
__a : List[str] =TEXT_TO_IMAGE_IMAGE_PARAMS
def __snake_case ( self ):
torch.manual_seed(0 )
lowerCAmelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
lowerCAmelCase = DDIMScheduler()
torch.manual_seed(0 )
lowerCAmelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
lowerCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
lowerCAmelCase = CLIPTextModel(UpperCAmelCase_ )
lowerCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
lowerCAmelCase = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_=0 ):
lowerCAmelCase = torch.manual_seed(UpperCAmelCase_ )
lowerCAmelCase = {
'''prompt''': '''a photo of the dolomites''',
'''generator''': generator,
# Setting height and width to None to prevent OOMs on CPU.
'''height''': None,
'''width''': None,
'''num_inference_steps''': 1,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def __snake_case ( self ):
lowerCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase = self.get_dummy_components()
lowerCAmelCase = StableDiffusionPanoramaPipeline(**UpperCAmelCase_ )
lowerCAmelCase = sd_pipe.to(UpperCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
lowerCAmelCase = self.get_dummy_inputs(UpperCAmelCase_ )
lowerCAmelCase = sd_pipe(**UpperCAmelCase_ ).images
lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase = np.array([0.6186, 0.5374, 0.4915, 0.4135, 0.4114, 0.4563, 0.5128, 0.4977, 0.4757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __snake_case ( self ):
super().test_inference_batch_consistent(batch_sizes=[1, 2] )
def __snake_case ( self ):
super().test_inference_batch_single_identical(batch_size=2 , expected_max_diff=3.2_5E-3 )
def __snake_case ( self ):
lowerCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase = self.get_dummy_components()
lowerCAmelCase = StableDiffusionPanoramaPipeline(**UpperCAmelCase_ )
lowerCAmelCase = sd_pipe.to(UpperCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
lowerCAmelCase = self.get_dummy_inputs(UpperCAmelCase_ )
lowerCAmelCase = '''french fries'''
lowerCAmelCase = sd_pipe(**UpperCAmelCase_ , negative_prompt=UpperCAmelCase_ )
lowerCAmelCase = output.images
lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase = np.array([0.6187, 0.5375, 0.4915, 0.4136, 0.4114, 0.4563, 0.5128, 0.4976, 0.4757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __snake_case ( self ):
lowerCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase = self.get_dummy_components()
lowerCAmelCase = StableDiffusionPanoramaPipeline(**UpperCAmelCase_ )
lowerCAmelCase = sd_pipe.to(UpperCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
lowerCAmelCase = self.get_dummy_inputs(UpperCAmelCase_ )
lowerCAmelCase = sd_pipe(**UpperCAmelCase_ , view_batch_size=2 )
lowerCAmelCase = output.images
lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase = np.array([0.6187, 0.5375, 0.4915, 0.4136, 0.4114, 0.4563, 0.5128, 0.4976, 0.4757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __snake_case ( self ):
lowerCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase = self.get_dummy_components()
lowerCAmelCase = EulerAncestralDiscreteScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='''scaled_linear''' )
lowerCAmelCase = StableDiffusionPanoramaPipeline(**UpperCAmelCase_ )
lowerCAmelCase = sd_pipe.to(UpperCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
lowerCAmelCase = self.get_dummy_inputs(UpperCAmelCase_ )
lowerCAmelCase = sd_pipe(**UpperCAmelCase_ ).images
lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase = np.array([0.4024, 0.6510, 0.4901, 0.5378, 0.5813, 0.5622, 0.4795, 0.4467, 0.4952] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __snake_case ( self ):
lowerCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase = self.get_dummy_components()
lowerCAmelCase = PNDMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , skip_prk_steps=UpperCAmelCase_ )
lowerCAmelCase = StableDiffusionPanoramaPipeline(**UpperCAmelCase_ )
lowerCAmelCase = sd_pipe.to(UpperCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
lowerCAmelCase = self.get_dummy_inputs(UpperCAmelCase_ )
lowerCAmelCase = sd_pipe(**UpperCAmelCase_ ).images
lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase = np.array([0.6391, 0.6291, 0.4861, 0.5134, 0.5552, 0.4578, 0.5032, 0.5023, 0.4539] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __snake_case ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __snake_case ( self , UpperCAmelCase_=0 ):
lowerCAmelCase = torch.manual_seed(UpperCAmelCase_ )
lowerCAmelCase = {
'''prompt''': '''a photo of the dolomites''',
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def __snake_case ( self ):
lowerCAmelCase = '''stabilityai/stable-diffusion-2-base'''
lowerCAmelCase = DDIMScheduler.from_pretrained(UpperCAmelCase_ , subfolder='''scheduler''' )
lowerCAmelCase = StableDiffusionPanoramaPipeline.from_pretrained(UpperCAmelCase_ , scheduler=UpperCAmelCase_ , safety_checker=UpperCAmelCase_ )
pipe.to(UpperCAmelCase_ )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
pipe.enable_attention_slicing()
lowerCAmelCase = self.get_inputs()
lowerCAmelCase = pipe(**UpperCAmelCase_ ).images
lowerCAmelCase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_12, 20_48, 3)
lowerCAmelCase = np.array(
[
0.36968392,
0.27025372,
0.32446766,
0.28379387,
0.36363274,
0.30733347,
0.27100027,
0.27054125,
0.25536096,
] )
assert np.abs(expected_slice - image_slice ).max() < 1E-2
def __snake_case ( self ):
lowerCAmelCase = StableDiffusionPanoramaPipeline.from_pretrained(
'''stabilityai/stable-diffusion-2-base''' , safety_checker=UpperCAmelCase_ )
lowerCAmelCase = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(UpperCAmelCase_ )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
pipe.enable_attention_slicing()
lowerCAmelCase = self.get_inputs()
lowerCAmelCase = pipe(**UpperCAmelCase_ ).images
lowerCAmelCase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_12, 20_48, 3)
lowerCAmelCase = np.array(
[
[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
]
] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def __snake_case ( self ):
lowerCAmelCase = 0
def callback_fn(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) -> None:
lowerCAmelCase = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
lowerCAmelCase = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 2_56)
lowerCAmelCase = latents[0, -3:, -3:, -1]
lowerCAmelCase = np.array(
[
0.18681869,
0.33907816,
0.5361276,
0.14432865,
-0.02856611,
-0.73941123,
0.23397987,
0.47322682,
-0.37823164,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
elif step == 2:
lowerCAmelCase = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 2_56)
lowerCAmelCase = latents[0, -3:, -3:, -1]
lowerCAmelCase = np.array(
[
0.18539645,
0.33987248,
0.5378559,
0.14437142,
-0.02455261,
-0.7338317,
0.23990755,
0.47356272,
-0.3786505,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
lowerCAmelCase = False
lowerCAmelCase = '''stabilityai/stable-diffusion-2-base'''
lowerCAmelCase = DDIMScheduler.from_pretrained(UpperCAmelCase_ , subfolder='''scheduler''' )
lowerCAmelCase = StableDiffusionPanoramaPipeline.from_pretrained(UpperCAmelCase_ , scheduler=UpperCAmelCase_ , safety_checker=UpperCAmelCase_ )
lowerCAmelCase = pipe.to(UpperCAmelCase_ )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
pipe.enable_attention_slicing()
lowerCAmelCase = self.get_inputs()
pipe(**UpperCAmelCase_ , callback=UpperCAmelCase_ , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def __snake_case ( self ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowerCAmelCase = '''stabilityai/stable-diffusion-2-base'''
lowerCAmelCase = DDIMScheduler.from_pretrained(UpperCAmelCase_ , subfolder='''scheduler''' )
lowerCAmelCase = StableDiffusionPanoramaPipeline.from_pretrained(UpperCAmelCase_ , scheduler=UpperCAmelCase_ , safety_checker=UpperCAmelCase_ )
lowerCAmelCase = pipe.to(UpperCAmelCase_ )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
lowerCAmelCase = self.get_inputs()
lowerCAmelCase = pipe(**UpperCAmelCase_ )
lowerCAmelCase = torch.cuda.max_memory_allocated()
# make sure that less than 5.2 GB is allocated
assert mem_bytes < 5.5 * 10**9
| 713
|
from collections.abc import Sequence
def UpperCAmelCase ( _snake_case , _snake_case = False ):
if not arr:
return 0
lowerCAmelCase = 0 if allow_empty_subarrays else float('''-inf''' )
lowerCAmelCase = 0.0
for num in arr:
lowerCAmelCase = max(0 if allow_empty_subarrays else num , curr_sum + num )
lowerCAmelCase = max(_snake_case , _snake_case )
return max_sum
if __name__ == "__main__":
from doctest import testmod
testmod()
UpperCAmelCase_ =[-2, 1, -3, 4, -1, 2, 1, -5, 4]
print(F'''{max_subarray_sum(nums) = }''')
| 33
| 0
|
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import is_speech_available, is_vision_available
from transformers.testing_utils import require_torch
if is_vision_available():
from transformers import TvltImageProcessor
if is_speech_available():
from transformers import TvltFeatureExtractor
from transformers import TvltProcessor
@require_torch
class __UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __snake_case ( self ):
lowerCAmelCase = '''ZinengTang/tvlt-base'''
lowerCAmelCase = tempfile.mkdtemp()
def __snake_case ( self , **UpperCAmelCase_ ):
return TvltImageProcessor.from_pretrained(self.checkpoint , **UpperCAmelCase_ )
def __snake_case ( self , **UpperCAmelCase_ ):
return TvltFeatureExtractor.from_pretrained(self.checkpoint , **UpperCAmelCase_ )
def __snake_case ( self ):
shutil.rmtree(self.tmpdirname )
def __snake_case ( self ):
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_feature_extractor()
lowerCAmelCase = TvltProcessor(image_processor=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase = TvltProcessor.from_pretrained(self.tmpdirname )
self.assertIsInstance(processor.feature_extractor , UpperCAmelCase_ )
self.assertIsInstance(processor.image_processor , UpperCAmelCase_ )
def __snake_case ( self ):
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_feature_extractor()
lowerCAmelCase = TvltProcessor(image_processor=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ )
lowerCAmelCase = np.ones([1_20_00] )
lowerCAmelCase = feature_extractor(UpperCAmelCase_ , return_tensors='''np''' )
lowerCAmelCase = processor(audio=UpperCAmelCase_ , return_tensors='''np''' )
for key in audio_dict.keys():
self.assertAlmostEqual(audio_dict[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __snake_case ( self ):
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_feature_extractor()
lowerCAmelCase = TvltProcessor(image_processor=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ )
lowerCAmelCase = np.ones([3, 2_24, 2_24] )
lowerCAmelCase = image_processor(UpperCAmelCase_ , return_tensors='''np''' )
lowerCAmelCase = processor(images=UpperCAmelCase_ , return_tensors='''np''' )
for key in image_dict.keys():
self.assertAlmostEqual(image_dict[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __snake_case ( self ):
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_feature_extractor()
lowerCAmelCase = TvltProcessor(image_processor=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ )
lowerCAmelCase = np.ones([1_20_00] )
lowerCAmelCase = np.ones([3, 2_24, 2_24] )
lowerCAmelCase = processor(audio=UpperCAmelCase_ , images=UpperCAmelCase_ )
self.assertListEqual(list(inputs.keys() ) , ['''audio_values''', '''audio_mask''', '''pixel_values''', '''pixel_mask'''] )
# test if it raises when no input is passed
with pytest.raises(UpperCAmelCase_ ):
processor()
def __snake_case ( self ):
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_feature_extractor()
lowerCAmelCase = TvltProcessor(image_processor=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ )
self.assertListEqual(
processor.model_input_names , image_processor.model_input_names + feature_extractor.model_input_names , msg='''`processor` and `image_processor`+`feature_extractor` model input names do not match''' , )
| 714
|
import os
import pickle
import unittest
from transformers import AutoTokenizer
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.models.bert_japanese.tokenization_bert_japanese import (
VOCAB_FILES_NAMES,
BertJapaneseTokenizer,
CharacterTokenizer,
JumanppTokenizer,
MecabTokenizer,
SudachiTokenizer,
WordpieceTokenizer,
)
from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi
from ...test_tokenization_common import TokenizerTesterMixin
@custom_tokenizers
class __UpperCamelCase ( __UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
__a : Any =BertJapaneseTokenizer
__a : Optional[int] =False
__a : int =True
def __snake_case ( self ):
super().setUp()
lowerCAmelCase = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''こんにちは''',
'''こん''',
'''にちは''',
'''ばんは''',
'''##こん''',
'''##にちは''',
'''##ばんは''',
'''世界''',
'''##世界''',
'''、''',
'''##、''',
'''。''',
'''##。''',
]
lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def __snake_case ( self , UpperCAmelCase_ ):
lowerCAmelCase = '''こんにちは、世界。 \nこんばんは、世界。'''
lowerCAmelCase = '''こんにちは 、 世界 。 こんばんは 、 世界 。'''
return input_text, output_text
def __snake_case ( self , UpperCAmelCase_ ):
lowerCAmelCase , lowerCAmelCase = self.get_input_output_texts(UpperCAmelCase_ )
lowerCAmelCase = tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ )
lowerCAmelCase = tokenizer.decode(UpperCAmelCase_ , clean_up_tokenization_spaces=UpperCAmelCase_ )
return text, ids
def __snake_case ( self ):
pass # TODO add if relevant
def __snake_case ( self ):
pass # TODO add if relevant
def __snake_case ( self ):
pass # TODO add if relevant
def __snake_case ( self ):
lowerCAmelCase = self.tokenizer_class(self.vocab_file )
lowerCAmelCase = tokenizer.tokenize('''こんにちは、世界。\nこんばんは、世界。''' )
self.assertListEqual(UpperCAmelCase_ , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
def __snake_case ( self ):
lowerCAmelCase = self.tokenizer_class(self.vocab_file , word_tokenizer_type='''mecab''' )
self.assertIsNotNone(UpperCAmelCase_ )
lowerCAmelCase = '''こんにちは、世界。\nこんばんは、世界。'''
lowerCAmelCase = tokenizer.tokenize(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
lowerCAmelCase = os.path.join(self.tmpdirname , '''tokenizer.bin''' )
with open(UpperCAmelCase_ , '''wb''' ) as handle:
pickle.dump(UpperCAmelCase_ , UpperCAmelCase_ )
with open(UpperCAmelCase_ , '''rb''' ) as handle:
lowerCAmelCase = pickle.load(UpperCAmelCase_ )
lowerCAmelCase = tokenizer_new.tokenize(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def __snake_case ( self ):
lowerCAmelCase = MecabTokenizer(mecab_dic='''ipadic''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップルストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , )
def __snake_case ( self ):
try:
lowerCAmelCase = MecabTokenizer(mecab_dic='''unidic_lite''' )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , )
def __snake_case ( self ):
try:
lowerCAmelCase = MecabTokenizer(mecab_dic='''unidic''' )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , )
def __snake_case ( self ):
lowerCAmelCase = MecabTokenizer(do_lower_case=UpperCAmelCase_ , mecab_dic='''ipadic''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップルストア''', '''で''', '''iphone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , )
def __snake_case ( self ):
try:
lowerCAmelCase = MecabTokenizer(
do_lower_case=UpperCAmelCase_ , normalize_text=UpperCAmelCase_ , mecab_option='''-d /usr/local/lib/mecab/dic/jumandic''' )
except RuntimeError:
# if dict doesn't exist in the system, previous code raises this error.
return
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップルストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , )
def __snake_case ( self ):
lowerCAmelCase = MecabTokenizer(normalize_text=UpperCAmelCase_ , mecab_dic='''ipadic''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップルストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', ''' ''', '''。'''] , )
@require_sudachi
def __snake_case ( self ):
lowerCAmelCase = self.tokenizer_class(self.vocab_file , word_tokenizer_type='''sudachi''' )
self.assertIsNotNone(UpperCAmelCase_ )
lowerCAmelCase = '''こんにちは、世界。\nこんばんは、世界。'''
lowerCAmelCase = tokenizer.tokenize(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
lowerCAmelCase = os.path.join(self.tmpdirname , '''tokenizer.bin''' )
with open(UpperCAmelCase_ , '''wb''' ) as handle:
pickle.dump(UpperCAmelCase_ , UpperCAmelCase_ )
with open(UpperCAmelCase_ , '''rb''' ) as handle:
lowerCAmelCase = pickle.load(UpperCAmelCase_ )
lowerCAmelCase = tokenizer_new.tokenize(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
@require_sudachi
def __snake_case ( self ):
lowerCAmelCase = SudachiTokenizer(sudachi_dict_type='''core''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , [''' ''', '''\t''', '''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', ''' ''', '''が''', ''' ''', ''' ''', '''\n ''', '''発売''', '''さ''', '''れ''', '''た''', ''' ''', '''。''', ''' ''', ''' '''] , )
@require_sudachi
def __snake_case ( self ):
lowerCAmelCase = SudachiTokenizer(sudachi_dict_type='''core''' , sudachi_split_mode='''A''' )
self.assertListEqual(tokenizer.tokenize('''外国人参政権''' ) , ['''外国''', '''人''', '''参政''', '''権'''] )
@require_sudachi
def __snake_case ( self ):
lowerCAmelCase = SudachiTokenizer(sudachi_dict_type='''core''' , sudachi_split_mode='''B''' )
self.assertListEqual(tokenizer.tokenize('''外国人参政権''' ) , ['''外国人''', '''参政権'''] )
@require_sudachi
def __snake_case ( self ):
lowerCAmelCase = SudachiTokenizer(sudachi_dict_type='''core''' , sudachi_split_mode='''C''' )
self.assertListEqual(tokenizer.tokenize('''外国人参政権''' ) , ['''外国人参政権'''] )
@require_sudachi
def __snake_case ( self ):
lowerCAmelCase = SudachiTokenizer(do_lower_case=UpperCAmelCase_ , sudachi_dict_type='''core''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , [''' ''', '''\t''', '''アップル''', '''ストア''', '''で''', '''iphone''', '''8''', ''' ''', '''が''', ''' ''', ''' ''', '''\n ''', '''発売''', '''さ''', '''れ''', '''た''', ''' ''', '''。''', ''' ''', ''' '''] , )
@require_sudachi
def __snake_case ( self ):
lowerCAmelCase = SudachiTokenizer(normalize_text=UpperCAmelCase_ , sudachi_dict_type='''core''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , [''' ''', '''\t''', '''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', ''' ''', '''が''', ''' ''', ''' ''', '''\n ''', '''発売''', '''さ''', '''れ''', '''た''', '''\u3000''', '''。''', ''' ''', ''' '''] , )
@require_sudachi
def __snake_case ( self ):
lowerCAmelCase = SudachiTokenizer(trim_whitespace=UpperCAmelCase_ , sudachi_dict_type='''core''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , )
@require_jumanpp
def __snake_case ( self ):
lowerCAmelCase = self.tokenizer_class(self.vocab_file , word_tokenizer_type='''jumanpp''' )
self.assertIsNotNone(UpperCAmelCase_ )
lowerCAmelCase = '''こんにちは、世界。\nこんばんは、世界。'''
lowerCAmelCase = tokenizer.tokenize(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
lowerCAmelCase = os.path.join(self.tmpdirname , '''tokenizer.bin''' )
with open(UpperCAmelCase_ , '''wb''' ) as handle:
pickle.dump(UpperCAmelCase_ , UpperCAmelCase_ )
with open(UpperCAmelCase_ , '''rb''' ) as handle:
lowerCAmelCase = pickle.load(UpperCAmelCase_ )
lowerCAmelCase = tokenizer_new.tokenize(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
@require_jumanpp
def __snake_case ( self ):
lowerCAmelCase = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''\u3000''', '''が''', '''\u3000''', '''\u3000''', '''\u3000''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , )
@require_jumanpp
def __snake_case ( self ):
lowerCAmelCase = JumanppTokenizer(do_lower_case=UpperCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iphone''', '''8''', '''\u3000''', '''が''', '''\u3000''', '''\u3000''', '''\u3000''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , )
@require_jumanpp
def __snake_case ( self ):
lowerCAmelCase = JumanppTokenizer(normalize_text=UpperCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''ア''', '''ッ''', '''フ''', '''゚''', '''ル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''\u3000''', '''が''', '''\u3000''', '''\u3000''', '''\u3000''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , )
@require_jumanpp
def __snake_case ( self ):
lowerCAmelCase = JumanppTokenizer(trim_whitespace=UpperCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れた''', '''。'''] , )
@require_jumanpp
def __snake_case ( self ):
lowerCAmelCase = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize('''ありがとうございますm(_ _)m見つけるのが大変です。''' ) , ['''ありがとう''', '''ございます''', '''m(_ _)m''', '''見つける''', '''の''', '''が''', '''大変です''', '''。'''] , )
def __snake_case ( self ):
lowerCAmelCase = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''こんにちは''', '''こん''', '''にちは''', '''ばんは''', '''##こん''', '''##にちは''', '''##ばんは''']
lowerCAmelCase = {}
for i, token in enumerate(UpperCAmelCase_ ):
lowerCAmelCase = i
lowerCAmelCase = WordpieceTokenizer(vocab=UpperCAmelCase_ , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''こんにちは''' ) , ['''こんにちは'''] )
self.assertListEqual(tokenizer.tokenize('''こんばんは''' ) , ['''こん''', '''##ばんは'''] )
self.assertListEqual(tokenizer.tokenize('''こんばんは こんばんにちは こんにちは''' ) , ['''こん''', '''##ばんは''', '''[UNK]''', '''こんにちは'''] )
def __snake_case ( self ):
lowerCAmelCase = BertJapaneseTokenizer.from_pretrained('''nlp-waseda/roberta-base-japanese-with-auto-jumanpp''' )
lowerCAmelCase = tokenizer.subword_tokenizer
lowerCAmelCase = subword_tokenizer.tokenize('''国境 の 長い トンネル を 抜ける と 雪国 であった 。''' )
self.assertListEqual(UpperCAmelCase_ , ['''▁国境''', '''▁の''', '''▁長い''', '''▁トンネル''', '''▁を''', '''▁抜ける''', '''▁と''', '''▁雪''', '''国''', '''▁であった''', '''▁。'''] )
lowerCAmelCase = subword_tokenizer.tokenize('''こんばんは こんばん にち は こんにちは''' )
self.assertListEqual(UpperCAmelCase_ , ['''▁こん''', '''ばん''', '''は''', '''▁こん''', '''ばん''', '''▁に''', '''ち''', '''▁は''', '''▁こんにちは'''] )
def __snake_case ( self ):
lowerCAmelCase = self.tokenizer_class.from_pretrained('''cl-tohoku/bert-base-japanese''' )
lowerCAmelCase = tokenizer.encode('''ありがとう。''' , add_special_tokens=UpperCAmelCase_ )
lowerCAmelCase = tokenizer.encode('''どういたしまして。''' , add_special_tokens=UpperCAmelCase_ )
lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_ )
lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_ , UpperCAmelCase_ )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class __UpperCamelCase ( __UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
__a : Union[str, Any] =BertJapaneseTokenizer
__a : Optional[int] =False
def __snake_case ( self ):
super().setUp()
lowerCAmelCase = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''こ''', '''ん''', '''に''', '''ち''', '''は''', '''ば''', '''世''', '''界''', '''、''', '''。''']
lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def __snake_case ( self , **UpperCAmelCase_ ):
return BertJapaneseTokenizer.from_pretrained(self.tmpdirname , subword_tokenizer_type='''character''' , **UpperCAmelCase_ )
def __snake_case ( self , UpperCAmelCase_ ):
lowerCAmelCase = '''こんにちは、世界。 \nこんばんは、世界。'''
lowerCAmelCase = '''こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。'''
return input_text, output_text
def __snake_case ( self ):
pass # TODO add if relevant
def __snake_case ( self ):
pass # TODO add if relevant
def __snake_case ( self ):
pass # TODO add if relevant
def __snake_case ( self ):
lowerCAmelCase = self.tokenizer_class(self.vocab_file , subword_tokenizer_type='''character''' )
lowerCAmelCase = tokenizer.tokenize('''こんにちは、世界。 \nこんばんは、世界。''' )
self.assertListEqual(
UpperCAmelCase_ , ['''こ''', '''ん''', '''に''', '''ち''', '''は''', '''、''', '''世''', '''界''', '''。''', '''こ''', '''ん''', '''ば''', '''ん''', '''は''', '''、''', '''世''', '''界''', '''。'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , [3, 4, 5, 6, 7, 11, 9, 10, 12, 3, 4, 8, 4, 7, 11, 9, 10, 12] )
def __snake_case ( self ):
lowerCAmelCase = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''こ''', '''ん''', '''に''', '''ち''', '''は''', '''ば''', '''世''', '''界''', '''、''', '''。''']
lowerCAmelCase = {}
for i, token in enumerate(UpperCAmelCase_ ):
lowerCAmelCase = i
lowerCAmelCase = CharacterTokenizer(vocab=UpperCAmelCase_ , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''こんにちは''' ) , ['''こ''', '''ん''', '''に''', '''ち''', '''は'''] )
self.assertListEqual(tokenizer.tokenize('''こんにちほ''' ) , ['''こ''', '''ん''', '''に''', '''ち''', '''[UNK]'''] )
def __snake_case ( self ):
lowerCAmelCase = self.tokenizer_class.from_pretrained('''cl-tohoku/bert-base-japanese-char''' )
lowerCAmelCase = tokenizer.encode('''ありがとう。''' , add_special_tokens=UpperCAmelCase_ )
lowerCAmelCase = tokenizer.encode('''どういたしまして。''' , add_special_tokens=UpperCAmelCase_ )
lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_ )
lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_ , UpperCAmelCase_ )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class __UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __snake_case ( self ):
lowerCAmelCase = '''cl-tohoku/bert-base-japanese'''
lowerCAmelCase = AutoTokenizer.from_pretrained(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
class __UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __snake_case ( self ):
lowerCAmelCase = '''cl-tohoku/bert-base-japanese'''
with self.assertLogs('''transformers''' , level='''WARNING''' ) as cm:
BertTokenizer.from_pretrained(UpperCAmelCase_ )
self.assertTrue(
cm.records[0].message.startswith(
'''The tokenizer class you load from this checkpoint is not the same type as the class this function'''
''' is called from.''' ) )
lowerCAmelCase = '''bert-base-cased'''
with self.assertLogs('''transformers''' , level='''WARNING''' ) as cm:
BertJapaneseTokenizer.from_pretrained(UpperCAmelCase_ )
self.assertTrue(
cm.records[0].message.startswith(
'''The tokenizer class you load from this checkpoint is not the same type as the class this function'''
''' is called from.''' ) )
| 33
| 0
|
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bart import BartTokenizer
UpperCAmelCase_ =logging.get_logger(__name__)
UpperCAmelCase_ ={"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
# See all BART models at https://huggingface.co/models?filter=bart
UpperCAmelCase_ ={
"""vocab_file""": {
"""facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/vocab.json""",
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/vocab.json""",
"""facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json""",
"""facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json""",
"""facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json""",
"""yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json""",
},
"""merges_file""": {
"""facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/merges.txt""",
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/merges.txt""",
"""facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt""",
"""facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt""",
"""facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt""",
"""yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json""",
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json""",
"""facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json""",
"""facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json""",
"""facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json""",
"""yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json""",
},
}
UpperCAmelCase_ ={
"""facebook/bart-base""": 1024,
"""facebook/bart-large""": 1024,
"""facebook/bart-large-mnli""": 1024,
"""facebook/bart-large-cnn""": 1024,
"""facebook/bart-large-xsum""": 1024,
"""yjernite/bart_eli5""": 1024,
}
class __UpperCamelCase ( __UpperCAmelCase ):
'''simple docstring'''
__a : Tuple =VOCAB_FILES_NAMES
__a : Optional[int] =PRETRAINED_VOCAB_FILES_MAP
__a : Any =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a : Optional[int] =["""input_ids""", """attention_mask"""]
__a : Tuple =BartTokenizer
def __init__( self , UpperCAmelCase_=None , UpperCAmelCase_=None , UpperCAmelCase_=None , UpperCAmelCase_="replace" , UpperCAmelCase_="<s>" , UpperCAmelCase_="</s>" , UpperCAmelCase_="</s>" , UpperCAmelCase_="<s>" , UpperCAmelCase_="<unk>" , UpperCAmelCase_="<pad>" , UpperCAmelCase_="<mask>" , UpperCAmelCase_=False , UpperCAmelCase_=True , **UpperCAmelCase_ , ):
super().__init__(
UpperCAmelCase_ , UpperCAmelCase_ , tokenizer_file=UpperCAmelCase_ , errors=UpperCAmelCase_ , bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , add_prefix_space=UpperCAmelCase_ , trim_offsets=UpperCAmelCase_ , **UpperCAmelCase_ , )
lowerCAmelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , UpperCAmelCase_ ) != add_prefix_space:
lowerCAmelCase = getattr(UpperCAmelCase_ , pre_tok_state.pop('''type''' ) )
lowerCAmelCase = add_prefix_space
lowerCAmelCase = pre_tok_class(**UpperCAmelCase_ )
lowerCAmelCase = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
lowerCAmelCase = '''post_processor'''
lowerCAmelCase = getattr(self.backend_tokenizer , UpperCAmelCase_ , UpperCAmelCase_ )
if tokenizer_component_instance:
lowerCAmelCase = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowerCAmelCase = tuple(state['''sep'''] )
if "cls" in state:
lowerCAmelCase = tuple(state['''cls'''] )
lowerCAmelCase = False
if state.get('''add_prefix_space''' , UpperCAmelCase_ ) != add_prefix_space:
lowerCAmelCase = add_prefix_space
lowerCAmelCase = True
if state.get('''trim_offsets''' , UpperCAmelCase_ ) != trim_offsets:
lowerCAmelCase = trim_offsets
lowerCAmelCase = True
if changes_to_apply:
lowerCAmelCase = getattr(UpperCAmelCase_ , state.pop('''type''' ) )
lowerCAmelCase = component_class(**UpperCAmelCase_ )
setattr(self.backend_tokenizer , UpperCAmelCase_ , UpperCAmelCase_ )
@property
def __snake_case ( self ):
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def __snake_case ( self , UpperCAmelCase_ ):
lowerCAmelCase = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else value
lowerCAmelCase = value
def __snake_case ( self , *UpperCAmelCase_ , **UpperCAmelCase_ ):
lowerCAmelCase = kwargs.get('''is_split_into_words''' , UpperCAmelCase_ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'''to use it with pretokenized inputs.''' )
return super()._batch_encode_plus(*UpperCAmelCase_ , **UpperCAmelCase_ )
def __snake_case ( self , *UpperCAmelCase_ , **UpperCAmelCase_ ):
lowerCAmelCase = kwargs.get('''is_split_into_words''' , UpperCAmelCase_ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'''to use it with pretokenized inputs.''' )
return super()._encode_plus(*UpperCAmelCase_ , **UpperCAmelCase_ )
def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ = None ):
lowerCAmelCase = self._tokenizer.model.save(UpperCAmelCase_ , name=UpperCAmelCase_ )
return tuple(UpperCAmelCase_ )
def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_=None ):
lowerCAmelCase = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ = None ):
lowerCAmelCase = [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 715
|
import json
import os
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from requests.exceptions import HTTPError
from transformers.utils import (
CONFIG_NAME,
FLAX_WEIGHTS_NAME,
TF2_WEIGHTS_NAME,
TRANSFORMERS_CACHE,
WEIGHTS_NAME,
cached_file,
get_file_from_repo,
has_file,
)
UpperCAmelCase_ ="""hf-internal-testing/tiny-random-bert"""
UpperCAmelCase_ =os.path.join(TRANSFORMERS_CACHE, """models--hf-internal-testing--tiny-random-bert""")
UpperCAmelCase_ ="""9b8c223d42b2188cb49d29af482996f9d0f3e5a6"""
class __UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __snake_case ( self ):
lowerCAmelCase = cached_file(UpperCAmelCase_ , UpperCAmelCase_ )
# Should have downloaded the file in here
self.assertTrue(os.path.isdir(UpperCAmelCase_ ) )
# Cache should contain at least those three subfolders:
for subfolder in ["blobs", "refs", "snapshots"]:
self.assertTrue(os.path.isdir(os.path.join(UpperCAmelCase_ , UpperCAmelCase_ ) ) )
with open(os.path.join(UpperCAmelCase_ , '''refs''' , '''main''' ) ) as f:
lowerCAmelCase = f.read()
self.assertEqual(UpperCAmelCase_ , os.path.join(UpperCAmelCase_ , '''snapshots''' , UpperCAmelCase_ , UpperCAmelCase_ ) )
self.assertTrue(os.path.isfile(UpperCAmelCase_ ) )
# File is cached at the same place the second time.
lowerCAmelCase = cached_file(UpperCAmelCase_ , UpperCAmelCase_ )
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ )
# Using a specific revision to test the full commit hash.
lowerCAmelCase = cached_file(UpperCAmelCase_ , UpperCAmelCase_ , revision='''9b8c223''' )
self.assertEqual(UpperCAmelCase_ , os.path.join(UpperCAmelCase_ , '''snapshots''' , UpperCAmelCase_ , UpperCAmelCase_ ) )
def __snake_case ( self ):
with self.assertRaisesRegex(UpperCAmelCase_ , '''is not a valid model identifier''' ):
lowerCAmelCase = cached_file('''tiny-random-bert''' , UpperCAmelCase_ )
with self.assertRaisesRegex(UpperCAmelCase_ , '''is not a valid git identifier''' ):
lowerCAmelCase = cached_file(UpperCAmelCase_ , UpperCAmelCase_ , revision='''aaaa''' )
with self.assertRaisesRegex(UpperCAmelCase_ , '''does not appear to have a file named''' ):
lowerCAmelCase = cached_file(UpperCAmelCase_ , '''conf''' )
def __snake_case ( self ):
with self.assertRaisesRegex(UpperCAmelCase_ , '''does not appear to have a file named''' ):
lowerCAmelCase = cached_file(UpperCAmelCase_ , '''conf''' )
with open(os.path.join(UpperCAmelCase_ , '''refs''' , '''main''' ) ) as f:
lowerCAmelCase = f.read()
self.assertTrue(os.path.isfile(os.path.join(UpperCAmelCase_ , '''.no_exist''' , UpperCAmelCase_ , '''conf''' ) ) )
lowerCAmelCase = cached_file(UpperCAmelCase_ , '''conf''' , _raise_exceptions_for_missing_entries=UpperCAmelCase_ )
self.assertIsNone(UpperCAmelCase_ )
lowerCAmelCase = cached_file(UpperCAmelCase_ , '''conf''' , local_files_only=UpperCAmelCase_ , _raise_exceptions_for_missing_entries=UpperCAmelCase_ )
self.assertIsNone(UpperCAmelCase_ )
lowerCAmelCase = mock.Mock()
lowerCAmelCase = 5_00
lowerCAmelCase = {}
lowerCAmelCase = HTTPError
lowerCAmelCase = {}
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('''requests.Session.request''' , return_value=UpperCAmelCase_ ) as mock_head:
lowerCAmelCase = cached_file(UpperCAmelCase_ , '''conf''' , _raise_exceptions_for_connection_errors=UpperCAmelCase_ )
self.assertIsNone(UpperCAmelCase_ )
# This check we did call the fake head request
mock_head.assert_called()
def __snake_case ( self ):
self.assertTrue(has_file('''hf-internal-testing/tiny-bert-pt-only''' , UpperCAmelCase_ ) )
self.assertFalse(has_file('''hf-internal-testing/tiny-bert-pt-only''' , UpperCAmelCase_ ) )
self.assertFalse(has_file('''hf-internal-testing/tiny-bert-pt-only''' , UpperCAmelCase_ ) )
def __snake_case ( self ):
# `get_file_from_repo` returns None if the file does not exist
self.assertIsNone(get_file_from_repo('''bert-base-cased''' , '''ahah.txt''' ) )
# The function raises if the repository does not exist.
with self.assertRaisesRegex(UpperCAmelCase_ , '''is not a valid model identifier''' ):
get_file_from_repo('''bert-base-case''' , UpperCAmelCase_ )
# The function raises if the revision does not exist.
with self.assertRaisesRegex(UpperCAmelCase_ , '''is not a valid git identifier''' ):
get_file_from_repo('''bert-base-cased''' , UpperCAmelCase_ , revision='''ahaha''' )
lowerCAmelCase = get_file_from_repo('''bert-base-cased''' , UpperCAmelCase_ )
# The name is the cached name which is not very easy to test, so instead we load the content.
lowerCAmelCase = json.loads(open(UpperCAmelCase_ , '''r''' ).read() )
self.assertEqual(config['''hidden_size'''] , 7_68 )
def __snake_case ( self ):
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCAmelCase = Path(UpperCAmelCase_ ) / '''a.txt'''
filename.touch()
self.assertEqual(get_file_from_repo(UpperCAmelCase_ , '''a.txt''' ) , str(UpperCAmelCase_ ) )
self.assertIsNone(get_file_from_repo(UpperCAmelCase_ , '''b.txt''' ) )
| 33
| 0
|
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __UpperCamelCase ( __UpperCAmelCase ):
'''simple docstring'''
__a : List[str] =["""image_processor""", """tokenizer"""]
__a : Optional[Any] ="""BridgeTowerImageProcessor"""
__a : Optional[Any] =("""RobertaTokenizer""", """RobertaTokenizerFast""")
def __init__( self , UpperCAmelCase_ , UpperCAmelCase_ ):
super().__init__(UpperCAmelCase_ , UpperCAmelCase_ )
def __call__( self , UpperCAmelCase_ , UpperCAmelCase_ = None , UpperCAmelCase_ = True , UpperCAmelCase_ = False , UpperCAmelCase_ = None , UpperCAmelCase_ = None , UpperCAmelCase_ = 0 , UpperCAmelCase_ = None , UpperCAmelCase_ = None , UpperCAmelCase_ = None , UpperCAmelCase_ = False , UpperCAmelCase_ = False , UpperCAmelCase_ = False , UpperCAmelCase_ = False , UpperCAmelCase_ = True , UpperCAmelCase_ = None , **UpperCAmelCase_ , ):
lowerCAmelCase = self.tokenizer(
text=UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ , max_length=UpperCAmelCase_ , stride=UpperCAmelCase_ , pad_to_multiple_of=UpperCAmelCase_ , return_token_type_ids=UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ , return_overflowing_tokens=UpperCAmelCase_ , return_special_tokens_mask=UpperCAmelCase_ , return_offsets_mapping=UpperCAmelCase_ , return_length=UpperCAmelCase_ , verbose=UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_ , )
# add pixel_values + pixel_mask
lowerCAmelCase = self.image_processor(
UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , do_normalize=UpperCAmelCase_ , do_center_crop=UpperCAmelCase_ , **UpperCAmelCase_ )
encoding.update(UpperCAmelCase_ )
return encoding
def __snake_case ( self , *UpperCAmelCase_ , **UpperCAmelCase_ ):
return self.tokenizer.batch_decode(*UpperCAmelCase_ , **UpperCAmelCase_ )
def __snake_case ( self , *UpperCAmelCase_ , **UpperCAmelCase_ ):
return self.tokenizer.decode(*UpperCAmelCase_ , **UpperCAmelCase_ )
@property
def __snake_case ( self ):
lowerCAmelCase = self.tokenizer.model_input_names
lowerCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 716
|
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class __UpperCamelCase ( __UpperCAmelCase ):
'''simple docstring'''
def __init__( self , UpperCAmelCase_ , UpperCAmelCase_ = None , UpperCAmelCase_ = None , UpperCAmelCase_ = True , UpperCAmelCase_ = None , UpperCAmelCase_ = False , UpperCAmelCase_ = None , UpperCAmelCase_ = True , UpperCAmelCase_ = "arrow" , **UpperCAmelCase_ , ):
super().__init__(
split=UpperCAmelCase_ , features=UpperCAmelCase_ , cache_dir=UpperCAmelCase_ , keep_in_memory=UpperCAmelCase_ , streaming=UpperCAmelCase_ , **UpperCAmelCase_ , )
lowerCAmelCase = load_from_cache_file
lowerCAmelCase = file_format
lowerCAmelCase = Spark(
df=UpperCAmelCase_ , features=UpperCAmelCase_ , cache_dir=UpperCAmelCase_ , working_dir=UpperCAmelCase_ , **UpperCAmelCase_ , )
def __snake_case ( self ):
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
lowerCAmelCase = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=UpperCAmelCase_ , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split )
| 33
| 0
|
UpperCAmelCase_ =[sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(10_0000)]
def UpperCAmelCase ( _snake_case ):
lowerCAmelCase = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 100000]
number //= 100000
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
UpperCAmelCase_ =[None] * 1000_0000
UpperCAmelCase_ =True
UpperCAmelCase_ =False
def UpperCAmelCase ( _snake_case ):
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
lowerCAmelCase = chain(next_number(_snake_case ) )
lowerCAmelCase = number_chain
while number < 10000000:
lowerCAmelCase = number_chain
number *= 10
return number_chain
def UpperCAmelCase ( _snake_case = 10000000 ):
for i in range(1 , _snake_case ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(_snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F'''{solution() = }''')
| 717
|
import math
import numpy as np
import qiskit
from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute
def UpperCAmelCase ( _snake_case = 3 ):
if isinstance(_snake_case , _snake_case ):
raise TypeError('''number of qubits must be a integer.''' )
if number_of_qubits <= 0:
raise ValueError('''number of qubits must be > 0.''' )
if math.floor(_snake_case ) != number_of_qubits:
raise ValueError('''number of qubits must be exact integer.''' )
if number_of_qubits > 10:
raise ValueError('''number of qubits too large to simulate(>10).''' )
lowerCAmelCase = QuantumRegister(_snake_case , '''qr''' )
lowerCAmelCase = ClassicalRegister(_snake_case , '''cr''' )
lowerCAmelCase = QuantumCircuit(_snake_case , _snake_case )
lowerCAmelCase = number_of_qubits
for i in range(_snake_case ):
quantum_circuit.h(number_of_qubits - i - 1 )
counter -= 1
for j in range(_snake_case ):
quantum_circuit.cp(np.pi / 2 ** (counter - j) , _snake_case , _snake_case )
for k in range(number_of_qubits // 2 ):
quantum_circuit.swap(_snake_case , number_of_qubits - k - 1 )
# measure all the qubits
quantum_circuit.measure(_snake_case , _snake_case )
# simulate with 10000 shots
lowerCAmelCase = Aer.get_backend('''qasm_simulator''' )
lowerCAmelCase = execute(_snake_case , _snake_case , shots=10000 )
return job.result().get_counts(_snake_case )
if __name__ == "__main__":
print(
F'''Total count for quantum fourier transform state is: \
{quantum_fourier_transform(3)}'''
)
| 33
| 0
|
from datetime import datetime
import requests
from bsa import BeautifulSoup
if __name__ == "__main__":
UpperCAmelCase_ =input("""Enter image url: """).strip()
print(F'''Downloading image from {url} ...''')
UpperCAmelCase_ =BeautifulSoup(requests.get(url).content, """html.parser""")
# The image URL is in the content field of the first meta tag with property og:image
UpperCAmelCase_ =soup.find("""meta""", {"""property""": """og:image"""})["""content"""]
UpperCAmelCase_ =requests.get(image_url).content
UpperCAmelCase_ =F'''{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg'''
with open(file_name, """wb""") as fp:
fp.write(image_data)
print(F'''Done. Image saved to disk as {file_name}.''')
| 718
|
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from typing import Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import randn_tensor
from .scheduling_utils import SchedulerMixin
class __UpperCamelCase ( __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__a : Any =1
@register_to_config
def __init__( self , UpperCAmelCase_=20_00 , UpperCAmelCase_=0.1 , UpperCAmelCase_=20 , UpperCAmelCase_=1E-3 ):
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = None
def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ = None ):
lowerCAmelCase = torch.linspace(1 , self.config.sampling_eps , UpperCAmelCase_ , device=UpperCAmelCase_ )
def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=None ):
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' )
# TODO(Patrick) better comments + non-PyTorch
# postprocess model score
lowerCAmelCase = (
-0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
)
lowerCAmelCase = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) )
lowerCAmelCase = std.flatten()
while len(std.shape ) < len(score.shape ):
lowerCAmelCase = std.unsqueeze(-1 )
lowerCAmelCase = -score / std
# compute
lowerCAmelCase = -1.0 / len(self.timesteps )
lowerCAmelCase = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min)
lowerCAmelCase = beta_t.flatten()
while len(beta_t.shape ) < len(x.shape ):
lowerCAmelCase = beta_t.unsqueeze(-1 )
lowerCAmelCase = -0.5 * beta_t * x
lowerCAmelCase = torch.sqrt(UpperCAmelCase_ )
lowerCAmelCase = drift - diffusion**2 * score
lowerCAmelCase = x + drift * dt
# add noise
lowerCAmelCase = randn_tensor(x.shape , layout=x.layout , generator=UpperCAmelCase_ , device=x.device , dtype=x.dtype )
lowerCAmelCase = x_mean + diffusion * math.sqrt(-dt ) * noise
return x, x_mean
def __len__( self ):
return self.config.num_train_timesteps
| 33
| 0
|
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
UpperCAmelCase_ =subprocess.check_output("""git merge-base main HEAD""".split()).decode("""utf-8""")
UpperCAmelCase_ =(
subprocess.check_output(F'''git diff --diff-filter=d --name-only {fork_point_sha}'''.split()).decode("""utf-8""").split()
)
UpperCAmelCase_ ="""|""".join(sys.argv[1:])
UpperCAmelCase_ =re.compile(RF'''^({joined_dirs}).*?\.py$''')
UpperCAmelCase_ =[x for x in modified_files if regex.match(x)]
print(""" """.join(relevant_modified_files), end="""""")
| 719
|
from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class __UpperCamelCase ( yaml.SafeLoader ):
'''simple docstring'''
def __snake_case ( self , UpperCAmelCase_ ):
lowerCAmelCase = [self.constructed_objects[key_node] for key_node, _ in node.value]
lowerCAmelCase = [tuple(UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else key for key in keys]
lowerCAmelCase = Counter(UpperCAmelCase_ )
lowerCAmelCase = [key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(F"""Got duplicate yaml keys: {duplicate_keys}""" )
def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_=False ):
lowerCAmelCase = super().construct_mapping(UpperCAmelCase_ , deep=UpperCAmelCase_ )
self._check_no_duplicates_on_constructed_node(UpperCAmelCase_ )
return mapping
def UpperCAmelCase ( _snake_case ):
lowerCAmelCase = list(readme_content.splitlines() )
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
lowerCAmelCase = full_content[1:].index('''---''' ) + 1
lowerCAmelCase = '''\n'''.join(full_content[1:sep_idx] )
return yamlblock, "\n".join(full_content[sep_idx + 1 :] )
return None, "\n".join(_snake_case )
class __UpperCamelCase ( __UpperCAmelCase ):
'''simple docstring'''
__a : Any ={"""train_eval_index"""} # train-eval-index in the YAML metadata
@classmethod
def __snake_case ( cls , UpperCAmelCase_ ):
with open(UpperCAmelCase_ , encoding='''utf-8''' ) as readme_file:
lowerCAmelCase , lowerCAmelCase = _split_yaml_from_readme(readme_file.read() )
if yaml_string is not None:
return cls.from_yaml_string(UpperCAmelCase_ )
else:
return cls()
def __snake_case ( self , UpperCAmelCase_ ):
if path.exists():
with open(UpperCAmelCase_ , encoding='''utf-8''' ) as readme_file:
lowerCAmelCase = readme_file.read()
else:
lowerCAmelCase = None
lowerCAmelCase = self._to_readme(UpperCAmelCase_ )
with open(UpperCAmelCase_ , '''w''' , encoding='''utf-8''' ) as readme_file:
readme_file.write(UpperCAmelCase_ )
def __snake_case ( self , UpperCAmelCase_ = None ):
if readme_content is not None:
lowerCAmelCase , lowerCAmelCase = _split_yaml_from_readme(UpperCAmelCase_ )
lowerCAmelCase = '''---\n''' + self.to_yaml_string() + '''---\n''' + content
else:
lowerCAmelCase = '''---\n''' + self.to_yaml_string() + '''---\n'''
return full_content
@classmethod
def __snake_case ( cls , UpperCAmelCase_ ):
lowerCAmelCase = yaml.load(UpperCAmelCase_ , Loader=_NoDuplicateSafeLoader ) or {}
# Convert the YAML keys to DatasetMetadata fields
lowerCAmelCase = {
(key.replace('''-''' , '''_''' ) if key.replace('''-''' , '''_''' ) in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**UpperCAmelCase_ )
def __snake_case ( self ):
return yaml.safe_dump(
{
(key.replace('''_''' , '''-''' ) if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
} , sort_keys=UpperCAmelCase_ , allow_unicode=UpperCAmelCase_ , encoding='''utf-8''' , ).decode('''utf-8''' )
UpperCAmelCase_ ={
"""image-classification""": [],
"""translation""": [],
"""image-segmentation""": [],
"""fill-mask""": [],
"""automatic-speech-recognition""": [],
"""token-classification""": [],
"""sentence-similarity""": [],
"""audio-classification""": [],
"""question-answering""": [],
"""summarization""": [],
"""zero-shot-classification""": [],
"""table-to-text""": [],
"""feature-extraction""": [],
"""other""": [],
"""multiple-choice""": [],
"""text-classification""": [],
"""text-to-image""": [],
"""text2text-generation""": [],
"""zero-shot-image-classification""": [],
"""tabular-classification""": [],
"""tabular-regression""": [],
"""image-to-image""": [],
"""tabular-to-text""": [],
"""unconditional-image-generation""": [],
"""text-retrieval""": [],
"""text-to-speech""": [],
"""object-detection""": [],
"""audio-to-audio""": [],
"""text-generation""": [],
"""conversational""": [],
"""table-question-answering""": [],
"""visual-question-answering""": [],
"""image-to-text""": [],
"""reinforcement-learning""": [],
"""voice-activity-detection""": [],
"""time-series-forecasting""": [],
"""document-question-answering""": [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
UpperCAmelCase_ =ArgumentParser(usage="""Validate the yaml metadata block of a README.md file.""")
ap.add_argument("""readme_filepath""")
UpperCAmelCase_ =ap.parse_args()
UpperCAmelCase_ =Path(args.readme_filepath)
UpperCAmelCase_ =DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath)
| 33
| 0
|
import os
import pytest
from transformers.dynamic_module_utils import get_imports
UpperCAmelCase_ ="""
import os
"""
UpperCAmelCase_ ="""
def foo():
import os
return False
"""
UpperCAmelCase_ ="""
def foo():
def bar():
if True:
import os
return False
return bar()
"""
UpperCAmelCase_ ="""
import os
try:
import bar
except ImportError:
raise ValueError()
"""
UpperCAmelCase_ ="""
import os
def foo():
try:
import bar
except ImportError:
raise ValueError()
"""
UpperCAmelCase_ ="""
import os
try:
import bar
except (ImportError, AttributeError):
raise ValueError()
"""
UpperCAmelCase_ ="""
import os
try:
import bar
except ImportError as e:
raise ValueError()
"""
UpperCAmelCase_ ="""
import os
try:
import bar
except:
raise ValueError()
"""
UpperCAmelCase_ ="""
import os
try:
import bar
import baz
except ImportError:
raise ValueError()
"""
UpperCAmelCase_ ="""
import os
try:
import bar
import baz
except ImportError:
x = 1
raise ValueError()
"""
UpperCAmelCase_ =[
TOP_LEVEL_IMPORT,
IMPORT_IN_FUNCTION,
DEEPLY_NESTED_IMPORT,
TOP_LEVEL_TRY_IMPORT,
GENERIC_EXCEPT_IMPORT,
MULTILINE_TRY_IMPORT,
MULTILINE_BOTH_IMPORT,
MULTIPLE_EXCEPTS_IMPORT,
EXCEPT_AS_IMPORT,
TRY_IMPORT_IN_FUNCTION,
]
@pytest.mark.parametrize('''case''' , _snake_case )
def UpperCAmelCase ( _snake_case , _snake_case ):
lowerCAmelCase = os.path.join(_snake_case , '''test_file.py''' )
with open(_snake_case , '''w''' ) as _tmp_file:
_tmp_file.write(_snake_case )
lowerCAmelCase = get_imports(_snake_case )
assert parsed_imports == ["os"]
| 720
|
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class __UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
__a : str =MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
lowerCAmelCase = hf_hub_download(
repo_id='''nateraw/video-demo''' , filename='''archery.mp4''' , repo_type='''dataset''' )
lowerCAmelCase = VideoClassificationPipeline(model=UpperCAmelCase_ , image_processor=UpperCAmelCase_ , top_k=2 )
lowerCAmelCase = [
example_video_filepath,
'''https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4''',
]
return video_classifier, examples
def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ ):
for example in examples:
lowerCAmelCase = video_classifier(UpperCAmelCase_ )
self.assertEqual(
UpperCAmelCase_ , [
{'''score''': ANY(UpperCAmelCase_ ), '''label''': ANY(UpperCAmelCase_ )},
{'''score''': ANY(UpperCAmelCase_ ), '''label''': ANY(UpperCAmelCase_ )},
] , )
@require_torch
def __snake_case ( self ):
lowerCAmelCase = '''hf-internal-testing/tiny-random-VideoMAEForVideoClassification'''
lowerCAmelCase = VideoMAEFeatureExtractor(
size={'''shortest_edge''': 10} , crop_size={'''height''': 10, '''width''': 10} )
lowerCAmelCase = pipeline(
'''video-classification''' , model=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ , frame_sampling_rate=4 )
lowerCAmelCase = hf_hub_download(repo_id='''nateraw/video-demo''' , filename='''archery.mp4''' , repo_type='''dataset''' )
lowerCAmelCase = video_classifier(UpperCAmelCase_ , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase_ , decimals=4 ) , [{'''score''': 0.5199, '''label''': '''LABEL_0'''}, {'''score''': 0.4801, '''label''': '''LABEL_1'''}] , )
lowerCAmelCase = video_classifier(
[
video_file_path,
video_file_path,
] , top_k=2 , )
self.assertEqual(
nested_simplify(UpperCAmelCase_ , decimals=4 ) , [
[{'''score''': 0.5199, '''label''': '''LABEL_0'''}, {'''score''': 0.4801, '''label''': '''LABEL_1'''}],
[{'''score''': 0.5199, '''label''': '''LABEL_0'''}, {'''score''': 0.4801, '''label''': '''LABEL_1'''}],
] , )
@require_tf
def __snake_case ( self ):
pass
| 33
| 0
|
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
UpperCAmelCase_ =logging.get_logger(__name__)
UpperCAmelCase_ ={
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""adapter_layer""": """encoder.layers.*.adapter_layer""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
"""pooling_layer.linear""": """projector""",
"""pooling_layer.projection""": """classifier""",
}
UpperCAmelCase_ =[
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
"""projector""",
"""classifier""",
]
def UpperCAmelCase ( _snake_case ):
lowerCAmelCase = {}
with open(_snake_case , '''r''' ) as file:
for line_number, line in enumerate(_snake_case ):
lowerCAmelCase = line.strip()
if line:
lowerCAmelCase = line.split()
lowerCAmelCase = line_number
lowerCAmelCase = words[0]
lowerCAmelCase = value
return result
def UpperCAmelCase ( _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
for attribute in key.split('''.''' ):
lowerCAmelCase = getattr(_snake_case , _snake_case )
lowerCAmelCase = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(_snake_case ):
lowerCAmelCase = PARAM_MAPPING[full_name.split('''.''' )[-1]]
lowerCAmelCase = '''param'''
if weight_type is not None and weight_type != "param":
lowerCAmelCase = getattr(_snake_case , _snake_case ).shape
elif weight_type is not None and weight_type == "param":
lowerCAmelCase = hf_pointer
for attribute in hf_param_name.split('''.''' ):
lowerCAmelCase = getattr(_snake_case , _snake_case )
lowerCAmelCase = shape_pointer.shape
# let's reduce dimension
lowerCAmelCase = value[0]
else:
lowerCAmelCase = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}""" )
if weight_type == "weight":
lowerCAmelCase = value
elif weight_type == "weight_g":
lowerCAmelCase = value
elif weight_type == "weight_v":
lowerCAmelCase = value
elif weight_type == "bias":
lowerCAmelCase = value
elif weight_type == "param":
for attribute in hf_param_name.split('''.''' ):
lowerCAmelCase = getattr(_snake_case , _snake_case )
lowerCAmelCase = value
else:
lowerCAmelCase = value
logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def UpperCAmelCase ( _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
lowerCAmelCase = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(_snake_case ):
lowerCAmelCase = PARAM_MAPPING[full_name.split('''.''' )[-1]]
lowerCAmelCase = '''param'''
if weight_type is not None and weight_type != "param":
lowerCAmelCase = '''.'''.join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
lowerCAmelCase = '''.'''.join([key, hf_param_name] )
else:
lowerCAmelCase = key
lowerCAmelCase = value if '''lm_head''' in full_key else value[0]
UpperCAmelCase_ ={
"""W_a""": """linear_1.weight""",
"""W_b""": """linear_2.weight""",
"""b_a""": """linear_1.bias""",
"""b_b""": """linear_2.bias""",
"""ln_W""": """norm.weight""",
"""ln_b""": """norm.bias""",
}
def UpperCAmelCase ( _snake_case , _snake_case , _snake_case=None , _snake_case=None ):
lowerCAmelCase = False
for key, mapped_key in MAPPING.items():
lowerCAmelCase = '''wav2vec2.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
lowerCAmelCase = True
if "*" in mapped_key:
lowerCAmelCase = name.split(_snake_case )[0].split('''.''' )[-2]
lowerCAmelCase = mapped_key.replace('''*''' , _snake_case )
if "weight_g" in name:
lowerCAmelCase = '''weight_g'''
elif "weight_v" in name:
lowerCAmelCase = '''weight_v'''
elif "bias" in name:
lowerCAmelCase = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
lowerCAmelCase = '''weight'''
else:
lowerCAmelCase = None
if hf_dict is not None:
rename_dict(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case )
else:
set_recursively(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case )
return is_used
return is_used
def UpperCAmelCase ( _snake_case , _snake_case , _snake_case ):
lowerCAmelCase = []
lowerCAmelCase = fairseq_model.state_dict()
lowerCAmelCase = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
lowerCAmelCase = False
if "conv_layers" in name:
load_conv_layer(
_snake_case , _snake_case , _snake_case , _snake_case , hf_model.config.feat_extract_norm == '''group''' , )
lowerCAmelCase = True
else:
lowerCAmelCase = load_wavaveca_layer(_snake_case , _snake_case , _snake_case )
if not is_used:
unused_weights.append(_snake_case )
logger.warning(F"""Unused weights: {unused_weights}""" )
def UpperCAmelCase ( _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
lowerCAmelCase = full_name.split('''conv_layers.''' )[-1]
lowerCAmelCase = name.split('''.''' )
lowerCAmelCase = int(items[0] )
lowerCAmelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
lowerCAmelCase = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
lowerCAmelCase = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
lowerCAmelCase = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
lowerCAmelCase = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(_snake_case )
@torch.no_grad()
def UpperCAmelCase ( _snake_case , _snake_case , _snake_case=None , _snake_case=None , _snake_case=True , _snake_case=False ):
if config_path is not None:
lowerCAmelCase = WavaVecaConfig.from_pretrained(_snake_case )
else:
lowerCAmelCase = WavaVecaConfig()
if is_seq_class:
lowerCAmelCase = read_txt_into_dict(_snake_case )
lowerCAmelCase = idalabel
lowerCAmelCase = WavaVecaForSequenceClassification(_snake_case )
lowerCAmelCase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=_snake_case , return_attention_mask=_snake_case , )
feature_extractor.save_pretrained(_snake_case )
elif is_finetuned:
if dict_path:
lowerCAmelCase = Dictionary.load(_snake_case )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
lowerCAmelCase = target_dict.pad_index
lowerCAmelCase = target_dict.bos_index
lowerCAmelCase = target_dict.eos_index
lowerCAmelCase = len(target_dict.symbols )
lowerCAmelCase = os.path.join(_snake_case , '''vocab.json''' )
if not os.path.isdir(_snake_case ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(_snake_case ) )
return
os.makedirs(_snake_case , exist_ok=_snake_case )
lowerCAmelCase = target_dict.indices
# fairseq has the <pad> and <s> switched
lowerCAmelCase = 0
lowerCAmelCase = 1
with open(_snake_case , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(_snake_case , _snake_case )
lowerCAmelCase = WavaVecaCTCTokenizer(
_snake_case , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=_snake_case , )
lowerCAmelCase = True if config.feat_extract_norm == '''layer''' else False
lowerCAmelCase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=_snake_case , return_attention_mask=_snake_case , )
lowerCAmelCase = WavaVecaProcessor(feature_extractor=_snake_case , tokenizer=_snake_case )
processor.save_pretrained(_snake_case )
lowerCAmelCase = WavaVecaForCTC(_snake_case )
else:
lowerCAmelCase = WavaVecaForPreTraining(_snake_case )
if is_finetuned or is_seq_class:
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
else:
lowerCAmelCase = argparse.Namespace(task='''audio_pretraining''' )
lowerCAmelCase = fairseq.tasks.setup_task(_snake_case )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=_snake_case )
lowerCAmelCase = model[0].eval()
recursively_load_weights(_snake_case , _snake_case , not is_finetuned )
hf_wavavec.save_pretrained(_snake_case )
if __name__ == "__main__":
UpperCAmelCase_ =argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
parser.add_argument(
"""--is_seq_class""",
action="""store_true""",
help="""Whether the model to convert is a fine-tuned sequence classification model or not""",
)
UpperCAmelCase_ =parser.parse_args()
UpperCAmelCase_ =not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 721
|
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class __UpperCamelCase ( __UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
__a : Any ="""hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline"""
def __snake_case ( self , UpperCAmelCase_=0 ):
lowerCAmelCase = floats_tensor((1, 3, 1_28, 1_28) , rng=random.Random(UpperCAmelCase_ ) )
lowerCAmelCase = np.random.RandomState(UpperCAmelCase_ )
lowerCAmelCase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 3,
'''strength''': 0.75,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def __snake_case ( self ):
lowerCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
lowerCAmelCase = self.get_dummy_inputs()
lowerCAmelCase = pipe(**UpperCAmelCase_ ).images
lowerCAmelCase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 1_28, 1_28, 3)
lowerCAmelCase = np.array([0.69643, 0.58484, 0.50314, 0.58760, 0.55368, 0.59643, 0.51529, 0.41217, 0.49087] )
assert np.abs(image_slice - expected_slice ).max() < 1E-1
def __snake_case ( self ):
lowerCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
lowerCAmelCase = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=UpperCAmelCase_ )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
lowerCAmelCase = self.get_dummy_inputs()
lowerCAmelCase = pipe(**UpperCAmelCase_ ).images
lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
lowerCAmelCase = np.array([0.61737, 0.54642, 0.53183, 0.54465, 0.52742, 0.60525, 0.49969, 0.40655, 0.48154] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def __snake_case ( self ):
lowerCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
lowerCAmelCase = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
# warmup pass to apply optimizations
lowerCAmelCase = pipe(**self.get_dummy_inputs() )
lowerCAmelCase = self.get_dummy_inputs()
lowerCAmelCase = pipe(**UpperCAmelCase_ ).images
lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
lowerCAmelCase = np.array([0.52761, 0.59977, 0.49033, 0.49619, 0.54282, 0.50311, 0.47600, 0.40918, 0.45203] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def __snake_case ( self ):
lowerCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
lowerCAmelCase = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
lowerCAmelCase = self.get_dummy_inputs()
lowerCAmelCase = pipe(**UpperCAmelCase_ ).images
lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
lowerCAmelCase = np.array([0.52911, 0.60004, 0.49229, 0.49805, 0.54502, 0.50680, 0.47777, 0.41028, 0.45304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def __snake_case ( self ):
lowerCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
lowerCAmelCase = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
lowerCAmelCase = self.get_dummy_inputs()
lowerCAmelCase = pipe(**UpperCAmelCase_ ).images
lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
lowerCAmelCase = np.array([0.52911, 0.60004, 0.49229, 0.49805, 0.54502, 0.50680, 0.47777, 0.41028, 0.45304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def __snake_case ( self ):
lowerCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
lowerCAmelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
lowerCAmelCase = self.get_dummy_inputs()
lowerCAmelCase = pipe(**UpperCAmelCase_ ).images
lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
lowerCAmelCase = np.array([0.65331, 0.58277, 0.48204, 0.56059, 0.53665, 0.56235, 0.50969, 0.40009, 0.46552] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
@nightly
@require_onnxruntime
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@property
def __snake_case ( self ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def __snake_case ( self ):
lowerCAmelCase = ort.SessionOptions()
lowerCAmelCase = False
return options
def __snake_case ( self ):
lowerCAmelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
lowerCAmelCase = init_image.resize((7_68, 5_12) )
# using the PNDM scheduler by default
lowerCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''onnx''' , safety_checker=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
lowerCAmelCase = '''A fantasy landscape, trending on artstation'''
lowerCAmelCase = np.random.RandomState(0 )
lowerCAmelCase = pipe(
prompt=UpperCAmelCase_ , image=UpperCAmelCase_ , strength=0.75 , guidance_scale=7.5 , num_inference_steps=10 , generator=UpperCAmelCase_ , output_type='''np''' , )
lowerCAmelCase = output.images
lowerCAmelCase = images[0, 2_55:2_58, 3_83:3_86, -1]
assert images.shape == (1, 5_12, 7_68, 3)
lowerCAmelCase = np.array([0.4909, 0.5059, 0.5372, 0.4623, 0.4876, 0.5049, 0.4820, 0.4956, 0.5019] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def __snake_case ( self ):
lowerCAmelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
lowerCAmelCase = init_image.resize((7_68, 5_12) )
lowerCAmelCase = LMSDiscreteScheduler.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , subfolder='''scheduler''' , revision='''onnx''' )
lowerCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , revision='''onnx''' , scheduler=UpperCAmelCase_ , safety_checker=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
lowerCAmelCase = '''A fantasy landscape, trending on artstation'''
lowerCAmelCase = np.random.RandomState(0 )
lowerCAmelCase = pipe(
prompt=UpperCAmelCase_ , image=UpperCAmelCase_ , strength=0.75 , guidance_scale=7.5 , num_inference_steps=20 , generator=UpperCAmelCase_ , output_type='''np''' , )
lowerCAmelCase = output.images
lowerCAmelCase = images[0, 2_55:2_58, 3_83:3_86, -1]
assert images.shape == (1, 5_12, 7_68, 3)
lowerCAmelCase = np.array([0.8043, 0.926, 0.9581, 0.8119, 0.8954, 0.913, 0.7209, 0.7463, 0.7431] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
| 33
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCamelCase : Tuple = {'configuration_opt': ['OPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'OPTConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Tuple = [
'OPT_PRETRAINED_MODEL_ARCHIVE_LIST',
'OPTForCausalLM',
'OPTModel',
'OPTPreTrainedModel',
'OPTForSequenceClassification',
'OPTForQuestionAnswering',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Dict = ['TFOPTForCausalLM', 'TFOPTModel', 'TFOPTPreTrainedModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : str = [
'FlaxOPTForCausalLM',
'FlaxOPTModel',
'FlaxOPTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_opt import (
OPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OPTForCausalLM,
OPTForQuestionAnswering,
OPTForSequenceClassification,
OPTModel,
OPTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel
else:
import sys
__UpperCamelCase : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 34
|
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
__UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = ["""input_features""", """is_longer"""]
def __init__( self : Optional[Any] , UpperCamelCase__ : Dict=64 , UpperCamelCase__ : Optional[Any]=4_8000 , UpperCamelCase__ : Tuple=480 , UpperCamelCase__ : Union[str, Any]=10 , UpperCamelCase__ : List[Any]=1024 , UpperCamelCase__ : Tuple=0.0 , UpperCamelCase__ : int=False , UpperCamelCase__ : float = 0 , UpperCamelCase__ : float = 1_4000 , UpperCamelCase__ : int = None , UpperCamelCase__ : str = "fusion" , UpperCamelCase__ : str = "repeatpad" , **UpperCamelCase__ : Union[str, Any] , ):
'''simple docstring'''
super().__init__(
feature_size=UpperCamelCase__ , sampling_rate=UpperCamelCase__ , padding_value=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , **UpperCamelCase__ , )
SCREAMING_SNAKE_CASE : Union[str, Any] = top_db
SCREAMING_SNAKE_CASE : Union[str, Any] = truncation
SCREAMING_SNAKE_CASE : str = padding
SCREAMING_SNAKE_CASE : List[Any] = fft_window_size
SCREAMING_SNAKE_CASE : Tuple = (fft_window_size >> 1) + 1
SCREAMING_SNAKE_CASE : List[str] = hop_length
SCREAMING_SNAKE_CASE : List[Any] = max_length_s
SCREAMING_SNAKE_CASE : Tuple = max_length_s * sampling_rate
SCREAMING_SNAKE_CASE : List[Any] = sampling_rate
SCREAMING_SNAKE_CASE : List[str] = frequency_min
SCREAMING_SNAKE_CASE : Any = frequency_max
SCREAMING_SNAKE_CASE : List[Any] = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=UpperCamelCase__ , min_frequency=UpperCamelCase__ , max_frequency=UpperCamelCase__ , sampling_rate=UpperCamelCase__ , norm=UpperCamelCase__ , mel_scale='''htk''' , )
SCREAMING_SNAKE_CASE : Optional[Any] = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=UpperCamelCase__ , min_frequency=UpperCamelCase__ , max_frequency=UpperCamelCase__ , sampling_rate=UpperCamelCase__ , norm='''slaney''' , mel_scale='''slaney''' , )
def __A ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE : Optional[Any] = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def __A ( self : Optional[int] , UpperCamelCase__ : np.array , UpperCamelCase__ : Optional[np.array] = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = spectrogram(
UpperCamelCase__ , window_function(self.fft_window_size , '''hann''' ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=UpperCamelCase__ , log_mel='''dB''' , )
return log_mel_spectrogram.T
def __A ( self : Union[str, Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
SCREAMING_SNAKE_CASE : int = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
SCREAMING_SNAKE_CASE : Any = [0]
# randomly choose index for each part
SCREAMING_SNAKE_CASE : Union[str, Any] = np.random.choice(ranges[0] )
SCREAMING_SNAKE_CASE : List[Any] = np.random.choice(ranges[1] )
SCREAMING_SNAKE_CASE : int = np.random.choice(ranges[2] )
SCREAMING_SNAKE_CASE : Optional[int] = mel[idx_front : idx_front + chunk_frames, :]
SCREAMING_SNAKE_CASE : Optional[Any] = mel[idx_middle : idx_middle + chunk_frames, :]
SCREAMING_SNAKE_CASE : Tuple = mel[idx_back : idx_back + chunk_frames, :]
SCREAMING_SNAKE_CASE : str = torch.tensor(mel[None, None, :] )
SCREAMING_SNAKE_CASE : Optional[int] = torch.nn.functional.interpolate(
UpperCamelCase__ , size=[chunk_frames, 64] , mode='''bilinear''' , align_corners=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = mel_shrink[0][0].numpy()
SCREAMING_SNAKE_CASE : Union[str, Any] = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 )
return mel_fusion
def __A ( self : Dict , UpperCamelCase__ : np.array , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[str] ):
'''simple docstring'''
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
SCREAMING_SNAKE_CASE : Optional[Any] = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
SCREAMING_SNAKE_CASE : List[Any] = len(UpperCamelCase__ ) - max_length
SCREAMING_SNAKE_CASE : Dict = np.random.randint(0 , overflow + 1 )
SCREAMING_SNAKE_CASE : Optional[Any] = waveform[idx : idx + max_length]
SCREAMING_SNAKE_CASE : Any = self._np_extract_fbank_features(UpperCamelCase__ , self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
SCREAMING_SNAKE_CASE : Any = self._np_extract_fbank_features(UpperCamelCase__ , self.mel_filters )
SCREAMING_SNAKE_CASE : List[str] = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
SCREAMING_SNAKE_CASE : List[Any] = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
SCREAMING_SNAKE_CASE : Optional[Any] = np.stack([mel, mel, mel, mel] , axis=0 )
SCREAMING_SNAKE_CASE : Tuple = False
else:
SCREAMING_SNAKE_CASE : str = self._random_mel_fusion(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = True
else:
raise NotImplementedError(f"""data_truncating {truncation} not implemented""" )
else:
SCREAMING_SNAKE_CASE : List[str] = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
SCREAMING_SNAKE_CASE : Tuple = int(max_length / len(UpperCamelCase__ ) )
SCREAMING_SNAKE_CASE : Any = np.stack(np.tile(UpperCamelCase__ , n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
SCREAMING_SNAKE_CASE : List[Any] = int(max_length / len(UpperCamelCase__ ) )
SCREAMING_SNAKE_CASE : Dict = np.stack(np.tile(UpperCamelCase__ , UpperCamelCase__ ) )
SCREAMING_SNAKE_CASE : Dict = np.pad(UpperCamelCase__ , (0, max_length - waveform.shape[0]) , mode='''constant''' , constant_values=0 )
if truncation == "fusion":
SCREAMING_SNAKE_CASE : List[Any] = self._np_extract_fbank_features(UpperCamelCase__ , self.mel_filters )
SCREAMING_SNAKE_CASE : Union[str, Any] = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 )
else:
SCREAMING_SNAKE_CASE : List[str] = self._np_extract_fbank_features(UpperCamelCase__ , self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self : Union[str, Any] , UpperCamelCase__ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , UpperCamelCase__ : str = None , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , **UpperCamelCase__ : Any , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = truncation if truncation is not None else self.truncation
SCREAMING_SNAKE_CASE : List[str] = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"""The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"""
f""" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"""
f""" was sampled with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
SCREAMING_SNAKE_CASE : List[str] = isinstance(UpperCamelCase__ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" )
SCREAMING_SNAKE_CASE : int = is_batched_numpy or (
isinstance(UpperCamelCase__ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
SCREAMING_SNAKE_CASE : Any = [np.asarray(UpperCamelCase__ , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(UpperCamelCase__ , np.ndarray ):
SCREAMING_SNAKE_CASE : List[Any] = np.asarray(UpperCamelCase__ , dtype=np.floataa )
elif isinstance(UpperCamelCase__ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
SCREAMING_SNAKE_CASE : Union[str, Any] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
SCREAMING_SNAKE_CASE : List[str] = [np.asarray(UpperCamelCase__ )]
# convert to mel spectrogram, truncate and pad if needed.
SCREAMING_SNAKE_CASE : int = [
self._get_input_mel(UpperCamelCase__ , max_length if max_length else self.nb_max_samples , UpperCamelCase__ , UpperCamelCase__ )
for waveform in raw_speech
]
SCREAMING_SNAKE_CASE : str = []
SCREAMING_SNAKE_CASE : List[str] = []
for mel, longer in padded_inputs:
input_mel.append(UpperCamelCase__ )
is_longer.append(UpperCamelCase__ )
if truncation == "fusion" and sum(UpperCamelCase__ ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
SCREAMING_SNAKE_CASE : Union[str, Any] = np.random.randint(0 , len(UpperCamelCase__ ) )
SCREAMING_SNAKE_CASE : Optional[Any] = True
if isinstance(input_mel[0] , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE : Union[str, Any] = [np.asarray(UpperCamelCase__ , dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
SCREAMING_SNAKE_CASE : Optional[Any] = [[longer] for longer in is_longer]
SCREAMING_SNAKE_CASE : Union[str, Any] = {'''input_features''': input_mel, '''is_longer''': is_longer}
SCREAMING_SNAKE_CASE : int = BatchFeature(UpperCamelCase__ )
if return_tensors is not None:
SCREAMING_SNAKE_CASE : int = input_features.convert_to_tensors(UpperCamelCase__ )
return input_features
| 34
| 1
|
import operator
def A ( _lowercase , _lowercase = False , _lowercase = None ):
SCREAMING_SNAKE_CASE : int = operator.lt if reverse else operator.gt
SCREAMING_SNAKE_CASE : Tuple = solution or []
if not arr:
return solution
SCREAMING_SNAKE_CASE : Union[str, Any] = [arr.pop(0 )]
for i, item in enumerate(_lowercase ):
if _operator(_lowercase , sublist[-1] ):
sublist.append(_lowercase )
arr.pop(_lowercase )
# merging sublist into solution list
if not solution:
solution.extend(_lowercase )
else:
while sublist:
SCREAMING_SNAKE_CASE : Tuple = sublist.pop(0 )
for i, xx in enumerate(_lowercase ):
if not _operator(_lowercase , _lowercase ):
solution.insert(_lowercase , _lowercase )
break
else:
solution.append(_lowercase )
strand_sort(_lowercase , _lowercase , _lowercase )
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
| 34
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
__UpperCamelCase : str = logging.get_logger(__name__)
__UpperCamelCase : str = {
'microsoft/layoutlmv3-base': 'https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json',
}
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = """layoutlmv3"""
def __init__( self : Optional[int] , UpperCamelCase__ : Union[str, Any]=5_0265 , UpperCamelCase__ : List[Any]=768 , UpperCamelCase__ : Union[str, Any]=12 , UpperCamelCase__ : Union[str, Any]=12 , UpperCamelCase__ : Tuple=3072 , UpperCamelCase__ : List[Any]="gelu" , UpperCamelCase__ : List[Any]=0.1 , UpperCamelCase__ : List[str]=0.1 , UpperCamelCase__ : Any=512 , UpperCamelCase__ : List[Any]=2 , UpperCamelCase__ : List[Any]=0.02 , UpperCamelCase__ : Optional[Any]=1E-5 , UpperCamelCase__ : List[str]=1 , UpperCamelCase__ : int=0 , UpperCamelCase__ : str=2 , UpperCamelCase__ : List[str]=1024 , UpperCamelCase__ : str=128 , UpperCamelCase__ : str=128 , UpperCamelCase__ : Tuple=True , UpperCamelCase__ : Optional[int]=32 , UpperCamelCase__ : Any=128 , UpperCamelCase__ : Optional[Any]=64 , UpperCamelCase__ : Dict=256 , UpperCamelCase__ : List[Any]=True , UpperCamelCase__ : str=True , UpperCamelCase__ : Dict=True , UpperCamelCase__ : Dict=224 , UpperCamelCase__ : Optional[Any]=3 , UpperCamelCase__ : Any=16 , UpperCamelCase__ : Any=None , **UpperCamelCase__ : Optional[Any] , ):
'''simple docstring'''
super().__init__(
vocab_size=UpperCamelCase__ , hidden_size=UpperCamelCase__ , num_hidden_layers=UpperCamelCase__ , num_attention_heads=UpperCamelCase__ , intermediate_size=UpperCamelCase__ , hidden_act=UpperCamelCase__ , hidden_dropout_prob=UpperCamelCase__ , attention_probs_dropout_prob=UpperCamelCase__ , max_position_embeddings=UpperCamelCase__ , type_vocab_size=UpperCamelCase__ , initializer_range=UpperCamelCase__ , layer_norm_eps=UpperCamelCase__ , pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ , )
SCREAMING_SNAKE_CASE : Union[str, Any] = max_ad_position_embeddings
SCREAMING_SNAKE_CASE : List[Any] = coordinate_size
SCREAMING_SNAKE_CASE : List[str] = shape_size
SCREAMING_SNAKE_CASE : Optional[int] = has_relative_attention_bias
SCREAMING_SNAKE_CASE : List[Any] = rel_pos_bins
SCREAMING_SNAKE_CASE : str = max_rel_pos
SCREAMING_SNAKE_CASE : Any = has_spatial_attention_bias
SCREAMING_SNAKE_CASE : Union[str, Any] = rel_ad_pos_bins
SCREAMING_SNAKE_CASE : Union[str, Any] = max_rel_ad_pos
SCREAMING_SNAKE_CASE : Union[str, Any] = text_embed
SCREAMING_SNAKE_CASE : List[str] = visual_embed
SCREAMING_SNAKE_CASE : Optional[Any] = input_size
SCREAMING_SNAKE_CASE : Optional[Any] = num_channels
SCREAMING_SNAKE_CASE : List[Any] = patch_size
SCREAMING_SNAKE_CASE : List[Any] = classifier_dropout
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = version.parse("""1.12""")
@property
def __A ( self : str ):
'''simple docstring'''
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''sequence'''}),
('''bbox''', {0: '''batch''', 1: '''sequence'''}),
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
else:
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
('''bbox''', {0: '''batch''', 1: '''sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''sequence'''}),
('''pixel_values''', {0: '''batch''', 1: '''num_channels'''}),
] )
@property
def __A ( self : int ):
'''simple docstring'''
return 1E-5
@property
def __A ( self : Union[str, Any] ):
'''simple docstring'''
return 12
def __A ( self : Optional[Any] , UpperCamelCase__ : "ProcessorMixin" , UpperCamelCase__ : int = -1 , UpperCamelCase__ : int = -1 , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional["TensorType"] = None , UpperCamelCase__ : int = 3 , UpperCamelCase__ : int = 40 , UpperCamelCase__ : int = 40 , ):
'''simple docstring'''
setattr(processor.image_processor , '''apply_ocr''' , UpperCamelCase__ )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE : Any = compute_effective_axis_dimension(
UpperCamelCase__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE : List[Any] = processor.tokenizer.num_special_tokens_to_add(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Tuple = compute_effective_axis_dimension(
UpperCamelCase__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=UpperCamelCase__ )
# Generate dummy inputs according to compute batch and sequence
SCREAMING_SNAKE_CASE : Union[str, Any] = [[''' '''.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
SCREAMING_SNAKE_CASE : Any = [[[48, 84, 73, 128]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
SCREAMING_SNAKE_CASE : List[Any] = self._generate_dummy_images(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = dict(
processor(
UpperCamelCase__ , text=UpperCamelCase__ , boxes=UpperCamelCase__ , return_tensors=UpperCamelCase__ , ) )
return inputs
| 34
| 1
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
__UpperCamelCase : List[Any] = logging.get_logger(__name__)
__UpperCamelCase : List[Any] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
__UpperCamelCase : Any = {
'vocab_file': {
'squeezebert/squeezebert-uncased': (
'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt'
),
'squeezebert/squeezebert-mnli': 'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt',
'squeezebert/squeezebert-mnli-headless': (
'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'squeezebert/squeezebert-uncased': (
'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json'
),
'squeezebert/squeezebert-mnli': (
'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json'
),
'squeezebert/squeezebert-mnli-headless': (
'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json'
),
},
}
__UpperCamelCase : Tuple = {
'squeezebert/squeezebert-uncased': 512,
'squeezebert/squeezebert-mnli': 512,
'squeezebert/squeezebert-mnli-headless': 512,
}
__UpperCamelCase : int = {
'squeezebert/squeezebert-uncased': {'do_lower_case': True},
'squeezebert/squeezebert-mnli': {'do_lower_case': True},
'squeezebert/squeezebert-mnli-headless': {'do_lower_case': True},
}
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = VOCAB_FILES_NAMES
UpperCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ = PRETRAINED_INIT_CONFIGURATION
UpperCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ = SqueezeBertTokenizer
def __init__( self : str , UpperCamelCase__ : int=None , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : Union[str, Any]=True , UpperCamelCase__ : List[str]="[UNK]" , UpperCamelCase__ : int="[SEP]" , UpperCamelCase__ : Any="[PAD]" , UpperCamelCase__ : Union[str, Any]="[CLS]" , UpperCamelCase__ : List[Any]="[MASK]" , UpperCamelCase__ : Any=True , UpperCamelCase__ : int=None , **UpperCamelCase__ : Dict , ):
'''simple docstring'''
super().__init__(
UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , do_lower_case=UpperCamelCase__ , unk_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , tokenize_chinese_chars=UpperCamelCase__ , strip_accents=UpperCamelCase__ , **UpperCamelCase__ , )
SCREAMING_SNAKE_CASE : Tuple = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , UpperCamelCase__ ) != do_lower_case
or normalizer_state.get('''strip_accents''' , UpperCamelCase__ ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , UpperCamelCase__ ) != tokenize_chinese_chars
):
SCREAMING_SNAKE_CASE : Optional[Any] = getattr(UpperCamelCase__ , normalizer_state.pop('''type''' ) )
SCREAMING_SNAKE_CASE : Tuple = do_lower_case
SCREAMING_SNAKE_CASE : Dict = strip_accents
SCREAMING_SNAKE_CASE : str = tokenize_chinese_chars
SCREAMING_SNAKE_CASE : Union[str, Any] = normalizer_class(**UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[str] = do_lower_case
def __A ( self : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : List[str]=None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __A ( self : Optional[int] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = [self.sep_token_id]
SCREAMING_SNAKE_CASE : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __A ( self : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self._tokenizer.model.save(UpperCamelCase__ , name=UpperCamelCase__ )
return tuple(UpperCamelCase__ )
| 34
|
import os
import unittest
from transformers import FunnelTokenizer, FunnelTokenizerFast
from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowercase__ ( UpperCamelCase_ , unittest.TestCase):
UpperCamelCase_ = FunnelTokenizer
UpperCamelCase_ = FunnelTokenizerFast
UpperCamelCase_ = True
UpperCamelCase_ = True
def __A ( self : Union[str, Any] ):
'''simple docstring'''
super().setUp()
SCREAMING_SNAKE_CASE : Optional[Any] = [
'''<unk>''',
'''<cls>''',
'''<sep>''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
SCREAMING_SNAKE_CASE : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def __A ( self : int , **UpperCamelCase__ : List[Any] ):
'''simple docstring'''
return FunnelTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def __A ( self : int , **UpperCamelCase__ : List[Any] ):
'''simple docstring'''
return FunnelTokenizerFast.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def __A ( self : Any , UpperCamelCase__ : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = '''UNwant\u00E9d,running'''
SCREAMING_SNAKE_CASE : int = '''unwanted, running'''
return input_text, output_text
def __A ( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer_class(self.vocab_file )
SCREAMING_SNAKE_CASE : int = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(UpperCamelCase__ , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , [7, 4, 5, 10, 8, 9] )
def __A ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self.get_tokenizers(do_lower_case=UpperCamelCase__ )
for tokenizer in tokenizers:
SCREAMING_SNAKE_CASE : int = tokenizer('''UNwant\u00E9d,running''' )
SCREAMING_SNAKE_CASE : Optional[Any] = len(inputs['''input_ids'''] ) - 1
self.assertListEqual(inputs['''token_type_ids'''] , [2] + [0] * sentence_len )
SCREAMING_SNAKE_CASE : List[Any] = tokenizer('''UNwant\u00E9d,running''' , '''UNwant\u00E9d,running''' )
self.assertListEqual(inputs['''token_type_ids'''] , [2] + [0] * sentence_len + [1] * sentence_len )
| 34
| 1
|
def A ( _lowercase ):
SCREAMING_SNAKE_CASE : str = [0] * len(_lowercase )
for i in range(1 , len(_lowercase ) ):
# use last results for better performance - dynamic programming
SCREAMING_SNAKE_CASE : Union[str, Any] = prefix_result[i - 1]
while j > 0 and input_string[i] != input_string[j]:
SCREAMING_SNAKE_CASE : List[Any] = prefix_result[j - 1]
if input_string[i] == input_string[j]:
j += 1
SCREAMING_SNAKE_CASE : int = j
return prefix_result
def A ( _lowercase ):
return max(prefix_function(_lowercase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 34
|
import numpy as np
import torch
from torch.utils.data import Dataset, IterableDataset
from ..utils.generic import ModelOutput
class lowercase__ ( UpperCamelCase_):
def __init__( self : str , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = dataset
SCREAMING_SNAKE_CASE : Optional[Any] = process
SCREAMING_SNAKE_CASE : Union[str, Any] = params
def __len__( self : Tuple ):
'''simple docstring'''
return len(self.dataset )
def __getitem__( self : List[str] , UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.dataset[i]
SCREAMING_SNAKE_CASE : Optional[int] = self.process(UpperCamelCase__ , **self.params )
return processed
class lowercase__ ( UpperCamelCase_):
def __init__( self : Dict , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any]=None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = loader
SCREAMING_SNAKE_CASE : List[Any] = infer
SCREAMING_SNAKE_CASE : int = params
if loader_batch_size == 1:
# Let's spare some time by deactivating altogether
SCREAMING_SNAKE_CASE : Optional[int] = None
SCREAMING_SNAKE_CASE : List[str] = loader_batch_size
# Internal bookkeeping
SCREAMING_SNAKE_CASE : Optional[int] = None
SCREAMING_SNAKE_CASE : int = None
def __len__( self : int ):
'''simple docstring'''
return len(self.loader )
def __iter__( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = iter(self.loader )
return self
def __A ( self : List[str] ):
'''simple docstring'''
if isinstance(self._loader_batch_data , torch.Tensor ):
# Batch data is simple tensor, just fetch the slice
SCREAMING_SNAKE_CASE : Optional[Any] = self._loader_batch_data[self._loader_batch_index]
else:
# Batch data is assumed to be BaseModelOutput (or dict)
SCREAMING_SNAKE_CASE : Union[str, Any] = {}
for k, element in self._loader_batch_data.items():
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
# Convert ModelOutput to tuple first
SCREAMING_SNAKE_CASE : Dict = element.to_tuple()
if isinstance(element[0] , torch.Tensor ):
SCREAMING_SNAKE_CASE : Dict = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
SCREAMING_SNAKE_CASE : Union[str, Any] = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(UpperCamelCase__ , UpperCamelCase__ ):
# Those are stored as lists of tensors so need specific unbatching.
if isinstance(element[0] , torch.Tensor ):
SCREAMING_SNAKE_CASE : Union[str, Any] = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
SCREAMING_SNAKE_CASE : List[str] = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if element is None:
# This can happen for optional data that get passed around
SCREAMING_SNAKE_CASE : int = None
elif isinstance(element[self._loader_batch_index] , torch.Tensor ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
SCREAMING_SNAKE_CASE : Union[str, Any] = element[self._loader_batch_index].unsqueeze(0 )
elif isinstance(element[self._loader_batch_index] , np.ndarray ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
SCREAMING_SNAKE_CASE : Tuple = np.expand_dims(element[self._loader_batch_index] , 0 )
else:
# This is typically a list, so no need to `unsqueeze`.
SCREAMING_SNAKE_CASE : Tuple = element[self._loader_batch_index]
# Recreate the element by reusing the original class to make it look
# batch_size=1
SCREAMING_SNAKE_CASE : Any = self._loader_batch_data.__class__(UpperCamelCase__ )
self._loader_batch_index += 1
return result
def __A ( self : Union[str, Any] ):
'''simple docstring'''
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
# We are currently unrolling a batch so we just need to return
# the current item within a batch
return self.loader_batch_item()
# We're out of items within a batch
SCREAMING_SNAKE_CASE : Tuple = next(self.iterator )
SCREAMING_SNAKE_CASE : List[Any] = self.infer(UpperCamelCase__ , **self.params )
# We now have a batch of "inferred things".
if self.loader_batch_size is not None:
# Try to infer the size of the batch
if isinstance(UpperCamelCase__ , torch.Tensor ):
SCREAMING_SNAKE_CASE : Optional[int] = processed
else:
SCREAMING_SNAKE_CASE : int = list(processed.keys() )[0]
SCREAMING_SNAKE_CASE : int = processed[key]
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE : List[Any] = len(UpperCamelCase__ )
else:
SCREAMING_SNAKE_CASE : Dict = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
SCREAMING_SNAKE_CASE : List[Any] = observed_batch_size
# Setting internal index to unwrap the batch
SCREAMING_SNAKE_CASE : List[Any] = processed
SCREAMING_SNAKE_CASE : int = 0
return self.loader_batch_item()
else:
# We're not unrolling batches
return processed
class lowercase__ ( UpperCamelCase_):
def __init__( self : str , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Union[str, Any]=None ):
'''simple docstring'''
super().__init__(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def __iter__( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = iter(self.loader )
SCREAMING_SNAKE_CASE : List[Any] = None
return self
def __A ( self : List[str] ):
'''simple docstring'''
if self.subiterator is None:
SCREAMING_SNAKE_CASE : Dict = self.infer(next(self.iterator ) , **self.params )
try:
# Try to return next item
SCREAMING_SNAKE_CASE : Any = next(self.subiterator )
except StopIteration:
# When a preprocess iterator ends, we can start lookig at the next item
# ChunkIterator will keep feeding until ALL elements of iterator
# all have created their subiterator and have been iterating against.
#
# Another way to look at it, is we're basically flattening lists of lists
# into a single list, but with generators
SCREAMING_SNAKE_CASE : Optional[Any] = self.infer(next(self.iterator ) , **self.params )
SCREAMING_SNAKE_CASE : Union[str, Any] = next(self.subiterator )
return processed
class lowercase__ ( UpperCamelCase_):
def __iter__( self : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = iter(self.loader )
return self
def __A ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = False
SCREAMING_SNAKE_CASE : Optional[int] = []
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
while self._loader_batch_index < self.loader_batch_size:
SCREAMING_SNAKE_CASE : Tuple = self.loader_batch_item()
SCREAMING_SNAKE_CASE : Any = item.pop('''is_last''' )
accumulator.append(UpperCamelCase__ )
if is_last:
return accumulator
while not is_last:
SCREAMING_SNAKE_CASE : Any = self.infer(next(self.iterator ) , **self.params )
if self.loader_batch_size is not None:
if isinstance(UpperCamelCase__ , torch.Tensor ):
SCREAMING_SNAKE_CASE : Tuple = processed
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = list(processed.keys() )[0]
SCREAMING_SNAKE_CASE : List[str] = processed[key]
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE : List[Any] = len(UpperCamelCase__ )
else:
SCREAMING_SNAKE_CASE : int = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
SCREAMING_SNAKE_CASE : List[str] = observed_batch_size
SCREAMING_SNAKE_CASE : List[Any] = processed
SCREAMING_SNAKE_CASE : str = 0
while self._loader_batch_index < self.loader_batch_size:
SCREAMING_SNAKE_CASE : Any = self.loader_batch_item()
SCREAMING_SNAKE_CASE : List[Any] = item.pop('''is_last''' )
accumulator.append(UpperCamelCase__ )
if is_last:
return accumulator
else:
SCREAMING_SNAKE_CASE : int = processed
SCREAMING_SNAKE_CASE : List[str] = item.pop('''is_last''' )
accumulator.append(UpperCamelCase__ )
return accumulator
class lowercase__ ( UpperCamelCase_):
def __init__( self : Optional[Any] , UpperCamelCase__ : Dataset , UpperCamelCase__ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = dataset
SCREAMING_SNAKE_CASE : Dict = key
def __len__( self : Optional[int] ):
'''simple docstring'''
return len(self.dataset )
def __getitem__( self : Dict , UpperCamelCase__ : Tuple ):
'''simple docstring'''
return self.dataset[i][self.key]
class lowercase__ ( UpperCamelCase_):
def __init__( self : List[Any] , UpperCamelCase__ : Dataset , UpperCamelCase__ : str , UpperCamelCase__ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = dataset
SCREAMING_SNAKE_CASE : List[str] = keya
SCREAMING_SNAKE_CASE : Tuple = keya
def __len__( self : List[str] ):
'''simple docstring'''
return len(self.dataset )
def __getitem__( self : Union[str, Any] , UpperCamelCase__ : Any ):
'''simple docstring'''
return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
| 34
| 1
|
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("""TEST_SAGEMAKER""" , """False""")) is not True , reason="""Skipping test because should only be run when releasing minor transformers version""" , )
@pytest.mark.usefixtures("""sm_env""")
@parameterized_class(
[
{
"""framework""": """pytorch""",
"""script""": """run_glue.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.p3.16xlarge""",
"""results""": {"""train_runtime""": 650, """eval_accuracy""": 0.7, """eval_loss""": 0.6},
},
{
"""framework""": """pytorch""",
"""script""": """run_ddp.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.p3.16xlarge""",
"""results""": {"""train_runtime""": 600, """eval_accuracy""": 0.7, """eval_loss""": 0.6},
},
{
"""framework""": """tensorflow""",
"""script""": """run_tf_dist.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.p3.16xlarge""",
"""results""": {"""train_runtime""": 600, """eval_accuracy""": 0.6, """eval_loss""": 0.7},
},
])
class lowercase__ ( unittest.TestCase):
def __A ( self : Any ):
'''simple docstring'''
if self.framework == "pytorch":
subprocess.run(
f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding='''utf-8''' , check=UpperCamelCase__ , )
assert hasattr(self , '''env''' )
def __A ( self : str , UpperCamelCase__ : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = f"""{self.env.base_job_name}-{instance_count}-{'ddp' if 'ddp' in self.script else 'smd'}"""
# distributed data settings
SCREAMING_SNAKE_CASE : Any = {'''smdistributed''': {'''dataparallel''': {'''enabled''': True}}} if self.script != '''run_ddp.py''' else None
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=UpperCamelCase__ , instance_count=UpperCamelCase__ , instance_type=self.instance_type , debugger_hook_config=UpperCamelCase__ , hyperparameters={**self.env.distributed_hyperparameters, '''model_name_or_path''': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=UpperCamelCase__ , py_version='''py36''' , )
def __A ( self : Optional[Any] , UpperCamelCase__ : List[str] ):
'''simple docstring'''
TrainingJobAnalytics(UpperCamelCase__ ).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(2,)] )
def __A ( self : Tuple , UpperCamelCase__ : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.create_estimator(UpperCamelCase__ )
# run training
estimator.fit()
# result dataframe
SCREAMING_SNAKE_CASE : Optional[Any] = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
SCREAMING_SNAKE_CASE : List[str] = list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''] )
SCREAMING_SNAKE_CASE : Dict = list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
SCREAMING_SNAKE_CASE : List[Any] = (
Session().describe_training_job(estimator.latest_training_job.name ).get('''TrainingTimeInSeconds''' , 99_9999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy )
assert all(t <= self.results['''eval_loss'''] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f"""{estimator.latest_training_job.name}.json""" , '''w''' ) as outfile:
json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} , UpperCamelCase__ )
| 34
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
__UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
__UpperCamelCase : Optional[Any] = {
'microsoft/deberta-v2-xlarge': 'https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json',
'microsoft/deberta-v2-xxlarge': 'https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json',
'microsoft/deberta-v2-xlarge-mnli': (
'https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json'
),
'microsoft/deberta-v2-xxlarge-mnli': (
'https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json'
),
}
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = """deberta-v2"""
def __init__( self : Optional[Any] , UpperCamelCase__ : Any=12_8100 , UpperCamelCase__ : Optional[int]=1536 , UpperCamelCase__ : Dict=24 , UpperCamelCase__ : List[str]=24 , UpperCamelCase__ : Tuple=6144 , UpperCamelCase__ : List[Any]="gelu" , UpperCamelCase__ : Optional[int]=0.1 , UpperCamelCase__ : List[str]=0.1 , UpperCamelCase__ : Optional[int]=512 , UpperCamelCase__ : Optional[Any]=0 , UpperCamelCase__ : Union[str, Any]=0.02 , UpperCamelCase__ : List[Any]=1E-7 , UpperCamelCase__ : List[str]=False , UpperCamelCase__ : str=-1 , UpperCamelCase__ : List[Any]=0 , UpperCamelCase__ : List[Any]=True , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : Tuple=0 , UpperCamelCase__ : str="gelu" , **UpperCamelCase__ : Optional[int] , ):
'''simple docstring'''
super().__init__(**UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = hidden_size
SCREAMING_SNAKE_CASE : int = num_hidden_layers
SCREAMING_SNAKE_CASE : Union[str, Any] = num_attention_heads
SCREAMING_SNAKE_CASE : List[Any] = intermediate_size
SCREAMING_SNAKE_CASE : Any = hidden_act
SCREAMING_SNAKE_CASE : Dict = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Optional[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : str = max_position_embeddings
SCREAMING_SNAKE_CASE : Dict = type_vocab_size
SCREAMING_SNAKE_CASE : str = initializer_range
SCREAMING_SNAKE_CASE : Optional[Any] = relative_attention
SCREAMING_SNAKE_CASE : Optional[Any] = max_relative_positions
SCREAMING_SNAKE_CASE : Optional[int] = pad_token_id
SCREAMING_SNAKE_CASE : Optional[Any] = position_biased_input
# Backwards compatibility
if type(UpperCamelCase__ ) == str:
SCREAMING_SNAKE_CASE : Optional[int] = [x.strip() for x in pos_att_type.lower().split('''|''' )]
SCREAMING_SNAKE_CASE : Any = pos_att_type
SCREAMING_SNAKE_CASE : int = vocab_size
SCREAMING_SNAKE_CASE : Dict = layer_norm_eps
SCREAMING_SNAKE_CASE : Tuple = kwargs.get('''pooler_hidden_size''' , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Any = pooler_dropout
SCREAMING_SNAKE_CASE : Union[str, Any] = pooler_hidden_act
class lowercase__ ( UpperCamelCase_):
@property
def __A ( self : Union[str, Any] ):
'''simple docstring'''
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE : Optional[int] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
SCREAMING_SNAKE_CASE : Optional[Any] = {0: '''batch''', 1: '''sequence'''}
if self._config.type_vocab_size > 0:
return OrderedDict(
[('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ('''token_type_ids''', dynamic_axis)] )
else:
return OrderedDict([('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis)] )
@property
def __A ( self : Union[str, Any] ):
'''simple docstring'''
return 12
def __A ( self : Dict , UpperCamelCase__ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , UpperCamelCase__ : int = -1 , UpperCamelCase__ : int = -1 , UpperCamelCase__ : int = -1 , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional["TensorType"] = None , UpperCamelCase__ : int = 3 , UpperCamelCase__ : int = 40 , UpperCamelCase__ : int = 40 , UpperCamelCase__ : "PreTrainedTokenizerBase" = None , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = super().generate_dummy_inputs(preprocessor=UpperCamelCase__ , framework=UpperCamelCase__ )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 34
| 1
|
from typing import Dict, List, Optional
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__UpperCamelCase : Dict = logging.get_logger(__name__)
__UpperCamelCase : List[str] = {
'nielsr/canine-s': 2048,
}
# Unicode defines 1,114,112 total “codepoints”
__UpperCamelCase : Union[str, Any] = 111_4112
# Below: Constants defining canonical codepoints for special, pseudo-characters.
# Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py
__UpperCamelCase : str = 0
__UpperCamelCase : Tuple = 0xe000
__UpperCamelCase : Any = 0xe001
__UpperCamelCase : List[str] = 0xe002
__UpperCamelCase : Union[str, Any] = 0xe003
__UpperCamelCase : Tuple = 0xe004
# Maps special codepoints to human-readable names.
__UpperCamelCase : Dict[int, str] = {
# Special symbols are represented using codepoints values that are valid,
# but designated as "Private Use", meaning that they will never be assigned
# characters by the Unicode Consortium, and are thus safe for use here.
#
# NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly
# excluded and should fail with a hard error.
CLS: "[CLS]",
SEP: "[SEP]",
BOS: "[BOS]",
MASK: "[MASK]",
PAD: "[PAD]",
RESERVED: "[RESERVED]",
}
# Maps special codepoint human-readable names to their codepoint values.
__UpperCamelCase : Dict[str, int] = {name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()}
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Tuple , UpperCamelCase__ : List[Any]=chr(UpperCamelCase__ ) , UpperCamelCase__ : Dict=chr(UpperCamelCase__ ) , UpperCamelCase__ : str=chr(UpperCamelCase__ ) , UpperCamelCase__ : List[Any]=chr(UpperCamelCase__ ) , UpperCamelCase__ : List[Any]=chr(UpperCamelCase__ ) , UpperCamelCase__ : List[Any]=chr(UpperCamelCase__ ) , UpperCamelCase__ : Optional[Any]=False , UpperCamelCase__ : Dict=2048 , **UpperCamelCase__ : Any , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else bos_token
SCREAMING_SNAKE_CASE : List[Any] = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else eos_token
SCREAMING_SNAKE_CASE : int = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else sep_token
SCREAMING_SNAKE_CASE : Union[str, Any] = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else cls_token
SCREAMING_SNAKE_CASE : Any = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE : Any = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else mask_token
super().__init__(
bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , model_max_length=UpperCamelCase__ , **UpperCamelCase__ , )
# Creates a mapping for looking up the IDs of special symbols.
SCREAMING_SNAKE_CASE : Dict[str, int] = {}
for codepoint, name in SPECIAL_CODEPOINTS.items():
SCREAMING_SNAKE_CASE : Optional[Any] = codepoint
# Creates a mapping for looking up the string forms of special symbol IDs.
SCREAMING_SNAKE_CASE : Dict[int, str] = {
codepoint: name for name, codepoint in self._special_codepoints.items()
}
SCREAMING_SNAKE_CASE : Any = UNICODE_VOCAB_SIZE
SCREAMING_SNAKE_CASE : List[Any] = len(self._special_codepoints )
@property
def __A ( self : Dict ):
'''simple docstring'''
return self._unicode_vocab_size
def __A ( self : Tuple , UpperCamelCase__ : str ):
'''simple docstring'''
return list(UpperCamelCase__ )
def __A ( self : Dict , UpperCamelCase__ : str ):
'''simple docstring'''
try:
return ord(UpperCamelCase__ )
except TypeError:
raise ValueError(f"""invalid token: '{token}'""" )
def __A ( self : str , UpperCamelCase__ : int ):
'''simple docstring'''
try:
if index in SPECIAL_CODEPOINTS:
return SPECIAL_CODEPOINTS[index]
return chr(UpperCamelCase__ )
except TypeError:
raise ValueError(f"""invalid id: {index}""" )
def __A ( self : List[Any] , UpperCamelCase__ : str ):
'''simple docstring'''
return "".join(UpperCamelCase__ )
def __A ( self : str , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = [self.sep_token_id]
SCREAMING_SNAKE_CASE : Union[str, Any] = [self.cls_token_id]
SCREAMING_SNAKE_CASE : Tuple = cls + token_ids_a + sep
if token_ids_a is not None:
result += token_ids_a + sep
return result
def __A ( self : str , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None , UpperCamelCase__ : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase__ , token_ids_a=UpperCamelCase__ , already_has_special_tokens=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[str] = [1] + ([0] * len(UpperCamelCase__ )) + [1]
if token_ids_a is not None:
result += ([0] * len(UpperCamelCase__ )) + [1]
return result
def __A ( self : List[str] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = [self.sep_token_id]
SCREAMING_SNAKE_CASE : Union[str, Any] = [self.cls_token_id]
SCREAMING_SNAKE_CASE : Any = len(cls + token_ids_a + sep ) * [0]
if token_ids_a is not None:
result += len(token_ids_a + sep ) * [1]
return result
def __A ( self : Optional[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None ):
'''simple docstring'''
return ()
| 34
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCamelCase : str = logging.get_logger(__name__)
def A ( _lowercase ):
SCREAMING_SNAKE_CASE : Any = '''huggingface/label-files'''
SCREAMING_SNAKE_CASE : Any = '''imagenet-1k-id2label.json'''
SCREAMING_SNAKE_CASE : Any = json.load(open(hf_hub_download(_lowercase , _lowercase , repo_type='''dataset''' ) , '''r''' ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = {int(_lowercase ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE : Dict = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE : str = '''std_conv''' if '''bit''' in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
SCREAMING_SNAKE_CASE : Optional[int] = BitConfig(
conv_layer=_lowercase , num_labels=1_000 , idalabel=_lowercase , labelaid=_lowercase , )
return config
def A ( _lowercase ):
if "stem.conv" in name:
SCREAMING_SNAKE_CASE : Optional[int] = name.replace('''stem.conv''' , '''bit.embedder.convolution''' )
if "blocks" in name:
SCREAMING_SNAKE_CASE : Tuple = name.replace('''blocks''' , '''layers''' )
if "head.fc" in name:
SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace('''head.fc''' , '''classifier.1''' )
if name.startswith('''norm''' ):
SCREAMING_SNAKE_CASE : str = '''bit.''' + name
if "bit" not in name and "classifier" not in name:
SCREAMING_SNAKE_CASE : Union[str, Any] = '''bit.encoder.''' + name
return name
def A ( ):
SCREAMING_SNAKE_CASE : Any = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
SCREAMING_SNAKE_CASE : List[str] = Image.open(requests.get(_lowercase , stream=_lowercase ).raw )
return im
@torch.no_grad()
def A ( _lowercase , _lowercase , _lowercase=False ):
SCREAMING_SNAKE_CASE : List[Any] = get_config(_lowercase )
# load original model from timm
SCREAMING_SNAKE_CASE : Optional[Any] = create_model(_lowercase , pretrained=_lowercase )
timm_model.eval()
# load state_dict of original model
SCREAMING_SNAKE_CASE : Optional[int] = timm_model.state_dict()
for key in state_dict.copy().keys():
SCREAMING_SNAKE_CASE : Dict = state_dict.pop(_lowercase )
SCREAMING_SNAKE_CASE : Optional[int] = val.squeeze() if '''head''' in key else val
# load HuggingFace model
SCREAMING_SNAKE_CASE : str = BitForImageClassification(_lowercase )
model.eval()
model.load_state_dict(_lowercase )
# create image processor
SCREAMING_SNAKE_CASE : Optional[Any] = create_transform(**resolve_data_config({} , model=_lowercase ) )
SCREAMING_SNAKE_CASE : List[str] = transform.transforms
SCREAMING_SNAKE_CASE : Union[str, Any] = {
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
SCREAMING_SNAKE_CASE : Tuple = BitImageProcessor(
do_resize=_lowercase , size={'''shortest_edge''': timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=_lowercase , crop_size={'''height''': timm_transforms[1].size[0], '''width''': timm_transforms[1].size[1]} , do_normalize=_lowercase , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
SCREAMING_SNAKE_CASE : Any = prepare_img()
SCREAMING_SNAKE_CASE : Union[str, Any] = transform(_lowercase ).unsqueeze(0 )
SCREAMING_SNAKE_CASE : Optional[int] = processor(_lowercase , return_tensors='''pt''' ).pixel_values
# verify pixel values
assert torch.allclose(_lowercase , _lowercase )
# verify logits
with torch.no_grad():
SCREAMING_SNAKE_CASE : Dict = model(_lowercase )
SCREAMING_SNAKE_CASE : Optional[Any] = outputs.logits
print('''Logits:''' , logits[0, :3] )
print('''Predicted class:''' , model.config.idalabel[logits.argmax(-1 ).item()] )
SCREAMING_SNAKE_CASE : List[Any] = timm_model(_lowercase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_lowercase , outputs.logits , atol=1e-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
Path(_lowercase ).mkdir(exist_ok=_lowercase )
print(f"""Saving model {model_name} and processor to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowercase )
processor.save_pretrained(_lowercase )
if push_to_hub:
print(f"""Pushing model {model_name} and processor to the hub""" )
model.push_to_hub(f"""ybelkada/{model_name}""" )
processor.push_to_hub(f"""ybelkada/{model_name}""" )
if __name__ == "__main__":
__UpperCamelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='resnetv2_50x1_bitm',
type=str,
help='Name of the BiT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model to the hub.',
)
__UpperCamelCase : Optional[int] = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 34
| 1
|
import unittest
import numpy as np
from transformers import DistilBertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.distilbert.modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
)
class lowercase__ ( unittest.TestCase):
def __init__( self : Dict , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any]=13 , UpperCamelCase__ : Optional[int]=7 , UpperCamelCase__ : str=True , UpperCamelCase__ : Tuple=True , UpperCamelCase__ : Tuple=True , UpperCamelCase__ : Dict=True , UpperCamelCase__ : Tuple=99 , UpperCamelCase__ : Tuple=32 , UpperCamelCase__ : List[str]=5 , UpperCamelCase__ : Tuple=4 , UpperCamelCase__ : Tuple=37 , UpperCamelCase__ : str="gelu" , UpperCamelCase__ : List[Any]=0.1 , UpperCamelCase__ : Optional[Any]=0.1 , UpperCamelCase__ : Tuple=512 , UpperCamelCase__ : Optional[Any]=16 , UpperCamelCase__ : Any=2 , UpperCamelCase__ : int=0.02 , UpperCamelCase__ : Tuple=4 , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = parent
SCREAMING_SNAKE_CASE : Any = batch_size
SCREAMING_SNAKE_CASE : Any = seq_length
SCREAMING_SNAKE_CASE : List[Any] = is_training
SCREAMING_SNAKE_CASE : str = use_attention_mask
SCREAMING_SNAKE_CASE : List[Any] = use_token_type_ids
SCREAMING_SNAKE_CASE : Dict = use_labels
SCREAMING_SNAKE_CASE : Optional[int] = vocab_size
SCREAMING_SNAKE_CASE : List[Any] = hidden_size
SCREAMING_SNAKE_CASE : List[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE : Optional[Any] = num_attention_heads
SCREAMING_SNAKE_CASE : List[str] = intermediate_size
SCREAMING_SNAKE_CASE : List[str] = hidden_act
SCREAMING_SNAKE_CASE : str = hidden_dropout_prob
SCREAMING_SNAKE_CASE : List[str] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : List[str] = max_position_embeddings
SCREAMING_SNAKE_CASE : List[str] = type_vocab_size
SCREAMING_SNAKE_CASE : List[str] = type_sequence_label_size
SCREAMING_SNAKE_CASE : Dict = initializer_range
SCREAMING_SNAKE_CASE : Union[str, Any] = num_choices
def __A ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE : int = None
if self.use_attention_mask:
SCREAMING_SNAKE_CASE : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE : Union[str, Any] = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , tie_weights_=UpperCamelCase__ , )
return config, input_ids, attention_mask
def __A ( self : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = config_and_inputs
SCREAMING_SNAKE_CASE : str = {'''input_ids''': input_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_flax
class lowercase__ ( UpperCamelCase_ , unittest.TestCase):
UpperCamelCase_ = (
(
FlaxDistilBertModel,
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def __A ( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = FlaxDistilBertModelTester(self )
@slow
def __A ( self : Any ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
SCREAMING_SNAKE_CASE : str = model_class_name.from_pretrained('''distilbert-base-uncased''' )
SCREAMING_SNAKE_CASE : Dict = model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCamelCase__ )
@require_flax
class lowercase__ ( unittest.TestCase):
@slow
def __A ( self : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = FlaxDistilBertModel.from_pretrained('''distilbert-base-uncased''' )
SCREAMING_SNAKE_CASE : List[Any] = np.array([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
SCREAMING_SNAKE_CASE : Any = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
SCREAMING_SNAKE_CASE : Tuple = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ )[0]
SCREAMING_SNAKE_CASE : Tuple = (1, 11, 768)
self.assertEqual(output.shape , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : str = np.array([[[-0.1639, 0.3299, 0.1648], [-0.1746, 0.3289, 0.1710], [-0.1884, 0.3357, 0.1810]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , UpperCamelCase__ , atol=1E-4 ) )
| 34
|
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
__UpperCamelCase : str = [
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
'text-classification',
'language-modeling',
'summarization',
'token-classification',
'question-answering',
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
__UpperCamelCase : int = logging.getLogger()
def A ( ):
SCREAMING_SNAKE_CASE : str = argparse.ArgumentParser()
parser.add_argument('''-f''' )
SCREAMING_SNAKE_CASE : List[str] = parser.parse_args()
return args.f
def A ( _lowercase , _lowercase="eval" ):
SCREAMING_SNAKE_CASE : Dict = os.path.join(_lowercase , f"""{split}_results.json""" )
if os.path.exists(_lowercase ):
with open(_lowercase , '''r''' ) as f:
return json.load(_lowercase )
raise ValueError(f"""can't find {path}""" )
__UpperCamelCase : Optional[Any] = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class lowercase__ ( UpperCamelCase_):
def __A ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE : Tuple = f"""
run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--eval_steps=2
--warmup_steps=2
--seed=42
--max_seq_length=128
""".split()
with patch.object(UpperCamelCase__ , '''argv''' , UpperCamelCase__ ):
run_flax_glue.main()
SCREAMING_SNAKE_CASE : Union[str, Any] = get_results(UpperCamelCase__ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
@slow
def __A ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE : str = f"""
run_clm_flax.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--block_size 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
""".split()
with patch.object(UpperCamelCase__ , '''argv''' , UpperCamelCase__ ):
run_clm_flax.main()
SCREAMING_SNAKE_CASE : Dict = get_results(UpperCamelCase__ )
self.assertLess(result['''eval_perplexity'''] , 100 )
@slow
def __A ( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE : Union[str, Any] = f"""
run_summarization.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--test_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=8
--do_train
--do_eval
--do_predict
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--predict_with_generate
""".split()
with patch.object(UpperCamelCase__ , '''argv''' , UpperCamelCase__ ):
run_summarization_flax.main()
SCREAMING_SNAKE_CASE : Union[str, Any] = get_results(UpperCamelCase__ , split='''test''' )
self.assertGreaterEqual(result['''test_rouge1'''] , 10 )
self.assertGreaterEqual(result['''test_rouge2'''] , 2 )
self.assertGreaterEqual(result['''test_rougeL'''] , 7 )
self.assertGreaterEqual(result['''test_rougeLsum'''] , 7 )
@slow
def __A ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE : Dict = f"""
run_mlm.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--overwrite_output_dir
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--logging_steps 2 --eval_steps 2
--do_train
--do_eval
--num_train_epochs=1
""".split()
with patch.object(UpperCamelCase__ , '''argv''' , UpperCamelCase__ ):
run_mlm_flax.main()
SCREAMING_SNAKE_CASE : List[Any] = get_results(UpperCamelCase__ )
self.assertLess(result['''eval_perplexity'''] , 42 )
@slow
def __A ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE : Union[str, Any] = f"""
run_t5_mlm_flax.py
--model_name_or_path t5-small
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
""".split()
with patch.object(UpperCamelCase__ , '''argv''' , UpperCamelCase__ ):
run_ta_mlm_flax.main()
SCREAMING_SNAKE_CASE : Optional[int] = get_results(UpperCamelCase__ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.42 )
@slow
def __A ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = 7 if get_gpu_count() > 1 else 2
SCREAMING_SNAKE_CASE : Dict = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE : Any = f"""
run_flax_ner.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--do_train
--do_eval
--warmup_steps=2
--learning_rate=2e-4
--logging_steps 2 --eval_steps 2
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
""".split()
with patch.object(UpperCamelCase__ , '''argv''' , UpperCamelCase__ ):
run_flax_ner.main()
SCREAMING_SNAKE_CASE : List[str] = get_results(UpperCamelCase__ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
self.assertGreaterEqual(result['''eval_f1'''] , 0.3 )
@slow
def __A ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE : Union[str, Any] = f"""
run_qa.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=2
--do_train
--do_eval
--logging_steps 2 --eval_steps 2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
""".split()
with patch.object(UpperCamelCase__ , '''argv''' , UpperCamelCase__ ):
run_qa.main()
SCREAMING_SNAKE_CASE : str = get_results(UpperCamelCase__ )
self.assertGreaterEqual(result['''eval_f1'''] , 30 )
self.assertGreaterEqual(result['''eval_exact'''] , 30 )
| 34
| 1
|
from __future__ import annotations
def A ( _lowercase ):
return len(set(_lowercase ) ) == len(_lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 34
|
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
__UpperCamelCase : Dict = random.Random()
def A ( _lowercase , _lowercase=1.0 , _lowercase=None , _lowercase=None ):
if rng is None:
SCREAMING_SNAKE_CASE : Any = global_rng
SCREAMING_SNAKE_CASE : int = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class lowercase__ ( unittest.TestCase):
def __init__( self : List[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : str=7 , UpperCamelCase__ : Any=400 , UpperCamelCase__ : List[str]=2000 , UpperCamelCase__ : List[Any]=2048 , UpperCamelCase__ : Any=128 , UpperCamelCase__ : Union[str, Any]=1 , UpperCamelCase__ : List[Any]=512 , UpperCamelCase__ : str=30 , UpperCamelCase__ : Tuple=4_4100 , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = parent
SCREAMING_SNAKE_CASE : str = batch_size
SCREAMING_SNAKE_CASE : str = min_seq_length
SCREAMING_SNAKE_CASE : Dict = max_seq_length
SCREAMING_SNAKE_CASE : Optional[int] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
SCREAMING_SNAKE_CASE : Optional[Any] = spectrogram_length
SCREAMING_SNAKE_CASE : Optional[int] = feature_size
SCREAMING_SNAKE_CASE : Tuple = num_audio_channels
SCREAMING_SNAKE_CASE : Union[str, Any] = hop_length
SCREAMING_SNAKE_CASE : List[Any] = chunk_length
SCREAMING_SNAKE_CASE : str = sampling_rate
def __A ( self : Optional[Any] ):
'''simple docstring'''
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def __A ( self : Tuple , UpperCamelCase__ : List[str]=False , UpperCamelCase__ : Optional[int]=False ):
'''simple docstring'''
def _flatten(UpperCamelCase__ : str ):
return list(itertools.chain(*UpperCamelCase__ ) )
if equal_length:
SCREAMING_SNAKE_CASE : List[str] = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
SCREAMING_SNAKE_CASE : int = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
SCREAMING_SNAKE_CASE : Optional[Any] = [np.asarray(UpperCamelCase__ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class lowercase__ ( UpperCamelCase_ , unittest.TestCase):
UpperCamelCase_ = TvltFeatureExtractor
def __A ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = TvltFeatureExtractionTester(self )
def __A ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(UpperCamelCase__ , '''spectrogram_length''' ) )
self.assertTrue(hasattr(UpperCamelCase__ , '''feature_size''' ) )
self.assertTrue(hasattr(UpperCamelCase__ , '''num_audio_channels''' ) )
self.assertTrue(hasattr(UpperCamelCase__ , '''hop_length''' ) )
self.assertTrue(hasattr(UpperCamelCase__ , '''chunk_length''' ) )
self.assertTrue(hasattr(UpperCamelCase__ , '''sampling_rate''' ) )
def __A ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE : Optional[Any] = feat_extract_first.save_pretrained(UpperCamelCase__ )[0]
check_json_file_has_correct_format(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = self.feature_extraction_class.from_pretrained(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = feat_extract_first.to_dict()
SCREAMING_SNAKE_CASE : str = feat_extract_second.to_dict()
SCREAMING_SNAKE_CASE : Optional[int] = dict_first.pop('''mel_filters''' )
SCREAMING_SNAKE_CASE : Optional[int] = dict_second.pop('''mel_filters''' )
self.assertTrue(np.allclose(UpperCamelCase__ , UpperCamelCase__ ) )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
def __A ( self : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE : Tuple = os.path.join(UpperCamelCase__ , '''feat_extract.json''' )
feat_extract_first.to_json_file(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[str] = self.feature_extraction_class.from_json_file(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Any = feat_extract_first.to_dict()
SCREAMING_SNAKE_CASE : str = feat_extract_second.to_dict()
SCREAMING_SNAKE_CASE : int = dict_first.pop('''mel_filters''' )
SCREAMING_SNAKE_CASE : Any = dict_second.pop('''mel_filters''' )
self.assertTrue(np.allclose(UpperCamelCase__ , UpperCamelCase__ ) )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
def __A ( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
SCREAMING_SNAKE_CASE : List[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
SCREAMING_SNAKE_CASE : Optional[int] = [np.asarray(UpperCamelCase__ ) for speech_input in speech_inputs]
# Test not batched input
SCREAMING_SNAKE_CASE : List[Any] = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
SCREAMING_SNAKE_CASE : Optional[Any] = feature_extractor(UpperCamelCase__ , return_tensors='''np''' , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
SCREAMING_SNAKE_CASE : List[str] = feature_extractor(
UpperCamelCase__ , return_tensors='''np''' , sampling_rate=4_4100 , mask_audio=UpperCamelCase__ ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
SCREAMING_SNAKE_CASE : Optional[Any] = [floats_list((1, x) )[0] for x in (800, 800, 800)]
SCREAMING_SNAKE_CASE : Dict = np.asarray(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : str = feature_extractor(UpperCamelCase__ , return_tensors='''np''' , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def __A ( self : Optional[int] , UpperCamelCase__ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
SCREAMING_SNAKE_CASE : Dict = ds.sort('''id''' ).select(range(UpperCamelCase__ ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def __A ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self._load_datasamples(1 )
SCREAMING_SNAKE_CASE : int = TvltFeatureExtractor()
SCREAMING_SNAKE_CASE : List[Any] = feature_extractor(UpperCamelCase__ , return_tensors='''pt''' ).audio_values
self.assertEquals(audio_values.shape , (1, 1, 192, 128) )
SCREAMING_SNAKE_CASE : str = torch.tensor([[-0.3032, -0.2708], [-0.4434, -0.4007]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , UpperCamelCase__ , atol=1E-4 ) )
| 34
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCamelCase : Any = {
'configuration_instructblip': [
'INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'InstructBlipConfig',
'InstructBlipQFormerConfig',
'InstructBlipVisionConfig',
],
'processing_instructblip': ['InstructBlipProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : List[Any] = [
'INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'InstructBlipQFormerModel',
'InstructBlipPreTrainedModel',
'InstructBlipForConditionalGeneration',
'InstructBlipVisionModel',
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
__UpperCamelCase : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 34
|
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class lowercase__ ( UpperCamelCase_ , UpperCamelCase_):
UpperCamelCase_ = 1
@register_to_config
def __init__( self : List[str] , UpperCamelCase__ : int = 1000 , UpperCamelCase__ : Optional[Union[np.ndarray, List[float]]] = None ):
'''simple docstring'''
self.set_timesteps(UpperCamelCase__ )
# standard deviation of the initial noise distribution
SCREAMING_SNAKE_CASE : str = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
SCREAMING_SNAKE_CASE : Tuple = 4
# running values
SCREAMING_SNAKE_CASE : int = []
def __A ( self : Dict , UpperCamelCase__ : int , UpperCamelCase__ : Union[str, torch.device] = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = num_inference_steps
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1]
SCREAMING_SNAKE_CASE : Tuple = torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
SCREAMING_SNAKE_CASE : int = torch.tensor(self.config.trained_betas , dtype=torch.floataa )
else:
SCREAMING_SNAKE_CASE : Optional[Any] = torch.sin(steps * math.pi / 2 ) ** 2
SCREAMING_SNAKE_CASE : Dict = (1.0 - self.betas**2) ** 0.5
SCREAMING_SNAKE_CASE : Optional[Any] = (torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1]
SCREAMING_SNAKE_CASE : List[str] = timesteps.to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Any = []
def __A ( self : Tuple , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : int , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : bool = True , ):
'''simple docstring'''
if self.num_inference_steps is None:
raise ValueError(
'''Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler''' )
SCREAMING_SNAKE_CASE : Optional[int] = (self.timesteps == timestep).nonzero().item()
SCREAMING_SNAKE_CASE : Union[str, Any] = timestep_index + 1
SCREAMING_SNAKE_CASE : int = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(UpperCamelCase__ )
if len(self.ets ) == 1:
SCREAMING_SNAKE_CASE : Dict = self.ets[-1]
elif len(self.ets ) == 2:
SCREAMING_SNAKE_CASE : Optional[int] = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
SCREAMING_SNAKE_CASE : str = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12
else:
SCREAMING_SNAKE_CASE : Optional[Any] = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4])
SCREAMING_SNAKE_CASE : Optional[int] = self._get_prev_sample(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=UpperCamelCase__ )
def __A ( self : Optional[Any] , UpperCamelCase__ : torch.FloatTensor , *UpperCamelCase__ : List[str] , **UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
return sample
def __A ( self : Union[str, Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Tuple , UpperCamelCase__ : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.alphas[timestep_index]
SCREAMING_SNAKE_CASE : List[str] = self.betas[timestep_index]
SCREAMING_SNAKE_CASE : Union[str, Any] = self.alphas[prev_timestep_index]
SCREAMING_SNAKE_CASE : Tuple = self.betas[prev_timestep_index]
SCREAMING_SNAKE_CASE : Dict = (sample - sigma * ets) / max(UpperCamelCase__ , 1E-8 )
SCREAMING_SNAKE_CASE : Optional[Any] = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self : int ):
'''simple docstring'''
return self.config.num_train_timesteps
| 34
| 1
|
from manim import *
class lowercase__ ( UpperCamelCase_):
def __A ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = Rectangle(height=0.5 , width=0.5 )
SCREAMING_SNAKE_CASE : List[Any] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
SCREAMING_SNAKE_CASE : Union[str, Any] = [mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE : Optional[Any] = [mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE : Any = VGroup(*UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0 )
SCREAMING_SNAKE_CASE : Tuple = VGroup(*UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0 )
SCREAMING_SNAKE_CASE : Any = VGroup(UpperCamelCase__ , UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0 )
SCREAMING_SNAKE_CASE : Any = Text('''CPU''' , font_size=24 )
SCREAMING_SNAKE_CASE : List[str] = Group(UpperCamelCase__ , UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0.5 , aligned_edge=UpperCamelCase__ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = [mem.copy() for i in range(4 )]
SCREAMING_SNAKE_CASE : Tuple = VGroup(*UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0 )
SCREAMING_SNAKE_CASE : str = Text('''GPU''' , font_size=24 )
SCREAMING_SNAKE_CASE : List[Any] = Group(UpperCamelCase__ , UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0.5 , aligned_edge=UpperCamelCase__ )
gpu.move_to([-1, -1, 0] )
self.add(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Dict = [mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE : str = VGroup(*UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0 )
SCREAMING_SNAKE_CASE : Any = Text('''Model''' , font_size=24 )
SCREAMING_SNAKE_CASE : Dict = Group(UpperCamelCase__ , UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0.5 , aligned_edge=UpperCamelCase__ )
model.move_to([3, -1.0, 0] )
self.add(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = []
for i, rect in enumerate(UpperCamelCase__ ):
rect.set_stroke(UpperCamelCase__ )
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
SCREAMING_SNAKE_CASE : Optional[int] = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(UpperCamelCase__ , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=UpperCamelCase__ )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=UpperCamelCase__ , buff=0.0 )
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=UpperCamelCase__ , buff=0.0 )
self.add(UpperCamelCase__ )
cpu_targs.append(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : int = [mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE : Optional[Any] = VGroup(*UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0 )
SCREAMING_SNAKE_CASE : Optional[Any] = Text('''Loaded Checkpoint''' , font_size=24 )
SCREAMING_SNAKE_CASE : Union[str, Any] = Group(UpperCamelCase__ , UpperCamelCase__ ).arrange(UpperCamelCase__ , aligned_edge=UpperCamelCase__ , buff=0.4 )
checkpoint.move_to([3, 0.5, 0] )
SCREAMING_SNAKE_CASE : Union[str, Any] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
SCREAMING_SNAKE_CASE : str = MarkupText(
f"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Tuple = MarkupText(
f"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=18 , )
blue_text.next_to(UpperCamelCase__ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
SCREAMING_SNAKE_CASE : List[str] = MarkupText(
f"""Next, a <i><span fgcolor=\"{BLUE}\">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor=\"{BLUE}\">single shard</span>.""" , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(UpperCamelCase__ ) , Write(UpperCamelCase__ ) )
self.play(Write(UpperCamelCase__ , run_time=1 ) , Create(UpperCamelCase__ , run_time=1 ) )
SCREAMING_SNAKE_CASE : List[Any] = []
SCREAMING_SNAKE_CASE : int = []
for i, rect in enumerate(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE : List[str] = fill.copy().set_fill(UpperCamelCase__ , opacity=0.7 )
target.move_to(UpperCamelCase__ )
first_animations.append(GrowFromCenter(UpperCamelCase__ , run_time=1 ) )
SCREAMING_SNAKE_CASE : str = target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5] )
second_animations.append(MoveToTarget(UpperCamelCase__ , run_time=1.5 ) )
self.play(*UpperCamelCase__ )
self.play(*UpperCamelCase__ )
self.wait()
| 34
|
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class lowercase__ ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase):
UpperCamelCase_ = IFPipeline
UpperCamelCase_ = TEXT_TO_IMAGE_PARAMS - {"""width""", """height""", """latents"""}
UpperCamelCase_ = TEXT_TO_IMAGE_BATCH_PARAMS
UpperCamelCase_ = PipelineTesterMixin.required_optional_params - {"""latents"""}
def __A ( self : Tuple ):
'''simple docstring'''
return self._get_dummy_components()
def __A ( self : int , UpperCamelCase__ : Dict , UpperCamelCase__ : int=0 ):
'''simple docstring'''
if str(UpperCamelCase__ ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE : Tuple = torch.manual_seed(UpperCamelCase__ )
else:
SCREAMING_SNAKE_CASE : Dict = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : int = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def __A ( self : List[str] ):
'''simple docstring'''
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' )
def __A ( self : Any ):
'''simple docstring'''
super().test_save_load_floataa(expected_max_diff=1E-1 )
def __A ( self : Union[str, Any] ):
'''simple docstring'''
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def __A ( self : List[Any] ):
'''simple docstring'''
self._test_save_load_local()
def __A ( self : List[str] ):
'''simple docstring'''
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def __A ( self : Tuple ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@slow
@require_torch_gpu
class lowercase__ ( unittest.TestCase):
def __A ( self : Optional[Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = IFPipeline.from_pretrained('''DeepFloyd/IF-I-XL-v1.0''' , variant='''fp16''' , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE : str = IFSuperResolutionPipeline.from_pretrained(
'''DeepFloyd/IF-II-L-v1.0''' , variant='''fp16''' , torch_dtype=torch.floataa , text_encoder=UpperCamelCase__ , tokenizer=UpperCamelCase__ )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to('''cuda''' )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = pipe_a.encode_prompt('''anime turtle''' , device='''cuda''' )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
SCREAMING_SNAKE_CASE : Tuple = None
SCREAMING_SNAKE_CASE : str = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
SCREAMING_SNAKE_CASE : Optional[int] = IFImgaImgPipeline(**pipe_a.components )
SCREAMING_SNAKE_CASE : Optional[int] = IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
SCREAMING_SNAKE_CASE : Tuple = IFInpaintingPipeline(**pipe_a.components )
SCREAMING_SNAKE_CASE : Optional[int] = IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def __A ( self : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Dict ):
'''simple docstring'''
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE : List[str] = torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[Any] = pipe_a(
prompt_embeds=UpperCamelCase__ , negative_prompt_embeds=UpperCamelCase__ , num_inference_steps=2 , generator=UpperCamelCase__ , output_type='''np''' , )
SCREAMING_SNAKE_CASE : Optional[int] = output.images[0]
assert image.shape == (64, 64, 3)
SCREAMING_SNAKE_CASE : Tuple = torch.cuda.max_memory_allocated()
assert mem_bytes < 13 * 10**9
SCREAMING_SNAKE_CASE : Optional[Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy''' )
assert_mean_pixel_difference(UpperCamelCase__ , UpperCamelCase__ )
# pipeline 2
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE : Tuple = torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE : int = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Any = pipe_a(
prompt_embeds=UpperCamelCase__ , negative_prompt_embeds=UpperCamelCase__ , image=UpperCamelCase__ , generator=UpperCamelCase__ , num_inference_steps=2 , output_type='''np''' , )
SCREAMING_SNAKE_CASE : Tuple = output.images[0]
assert image.shape == (256, 256, 3)
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
SCREAMING_SNAKE_CASE : int = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy''' )
assert_mean_pixel_difference(UpperCamelCase__ , UpperCamelCase__ )
def __A ( self : Optional[int] , UpperCamelCase__ : str , UpperCamelCase__ : int , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : str ):
'''simple docstring'''
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE : Tuple = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Any = torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE : List[Any] = pipe_a(
prompt_embeds=UpperCamelCase__ , negative_prompt_embeds=UpperCamelCase__ , image=UpperCamelCase__ , num_inference_steps=2 , generator=UpperCamelCase__ , output_type='''np''' , )
SCREAMING_SNAKE_CASE : int = output.images[0]
assert image.shape == (64, 64, 3)
SCREAMING_SNAKE_CASE : Optional[int] = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
SCREAMING_SNAKE_CASE : Tuple = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy''' )
assert_mean_pixel_difference(UpperCamelCase__ , UpperCamelCase__ )
# pipeline 2
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[Any] = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : str = pipe_a(
prompt_embeds=UpperCamelCase__ , negative_prompt_embeds=UpperCamelCase__ , image=UpperCamelCase__ , original_image=UpperCamelCase__ , generator=UpperCamelCase__ , num_inference_steps=2 , output_type='''np''' , )
SCREAMING_SNAKE_CASE : Optional[int] = output.images[0]
assert image.shape == (256, 256, 3)
SCREAMING_SNAKE_CASE : List[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
SCREAMING_SNAKE_CASE : Optional[Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy''' )
assert_mean_pixel_difference(UpperCamelCase__ , UpperCamelCase__ )
def __A ( self : List[str] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE : str = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(1 ) ).to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE : int = pipe_a(
prompt_embeds=UpperCamelCase__ , negative_prompt_embeds=UpperCamelCase__ , image=UpperCamelCase__ , mask_image=UpperCamelCase__ , num_inference_steps=2 , generator=UpperCamelCase__ , output_type='''np''' , )
SCREAMING_SNAKE_CASE : Any = output.images[0]
assert image.shape == (64, 64, 3)
SCREAMING_SNAKE_CASE : Any = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
SCREAMING_SNAKE_CASE : List[Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy''' )
assert_mean_pixel_difference(UpperCamelCase__ , UpperCamelCase__ )
# pipeline 2
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE : str = torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE : List[str] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : int = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = floats_tensor((1, 3, 256, 256) , rng=random.Random(1 ) ).to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Dict = pipe_a(
prompt_embeds=UpperCamelCase__ , negative_prompt_embeds=UpperCamelCase__ , image=UpperCamelCase__ , mask_image=UpperCamelCase__ , original_image=UpperCamelCase__ , generator=UpperCamelCase__ , num_inference_steps=2 , output_type='''np''' , )
SCREAMING_SNAKE_CASE : Optional[Any] = output.images[0]
assert image.shape == (256, 256, 3)
SCREAMING_SNAKE_CASE : Any = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
SCREAMING_SNAKE_CASE : Tuple = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy''' )
assert_mean_pixel_difference(UpperCamelCase__ , UpperCamelCase__ )
def A ( ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 34
| 1
|
def A ( _lowercase = 600_851_475_143 ):
try:
SCREAMING_SNAKE_CASE : List[Any] = int(_lowercase )
except (TypeError, ValueError):
raise TypeError('''Parameter n must be int or castable to int.''' )
if n <= 0:
raise ValueError('''Parameter n must be greater than or equal to one.''' )
SCREAMING_SNAKE_CASE : Optional[Any] = 1
SCREAMING_SNAKE_CASE : int = 2
while i * i <= n:
while n % i == 0:
SCREAMING_SNAKE_CASE : int = i
n //= i
i += 1
if n > 1:
SCREAMING_SNAKE_CASE : Union[str, Any] = n
return int(_lowercase )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 34
|
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
__UpperCamelCase : int = logging.get_logger(__name__)
def A ( _lowercase , _lowercase , _lowercase , _lowercase ):
def constraint_to_multiple_of(_lowercase , _lowercase , _lowercase=0 , _lowercase=None ):
SCREAMING_SNAKE_CASE : int = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
SCREAMING_SNAKE_CASE : Dict = math.floor(val / multiple ) * multiple
if x < min_val:
SCREAMING_SNAKE_CASE : Optional[Any] = math.ceil(val / multiple ) * multiple
return x
SCREAMING_SNAKE_CASE : Optional[Any] = (output_size, output_size) if isinstance(_lowercase , _lowercase ) else output_size
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = get_image_size(_lowercase )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = output_size
# determine new height and width
SCREAMING_SNAKE_CASE : Dict = output_height / input_height
SCREAMING_SNAKE_CASE : Optional[Any] = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
SCREAMING_SNAKE_CASE : List[Any] = scale_width
else:
# fit height
SCREAMING_SNAKE_CASE : List[Any] = scale_height
SCREAMING_SNAKE_CASE : List[str] = constraint_to_multiple_of(scale_height * input_height , multiple=_lowercase )
SCREAMING_SNAKE_CASE : Optional[int] = constraint_to_multiple_of(scale_width * input_width , multiple=_lowercase )
return (new_height, new_width)
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = ["""pixel_values"""]
def __init__( self : int , UpperCamelCase__ : bool = True , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : PILImageResampling = PILImageResampling.BILINEAR , UpperCamelCase__ : bool = False , UpperCamelCase__ : int = 1 , UpperCamelCase__ : bool = True , UpperCamelCase__ : Union[int, float] = 1 / 255 , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , **UpperCamelCase__ : Optional[int] , ):
'''simple docstring'''
super().__init__(**UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[str] = size if size is not None else {'''height''': 384, '''width''': 384}
SCREAMING_SNAKE_CASE : Any = get_size_dict(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Dict = do_resize
SCREAMING_SNAKE_CASE : Any = size
SCREAMING_SNAKE_CASE : str = keep_aspect_ratio
SCREAMING_SNAKE_CASE : List[str] = ensure_multiple_of
SCREAMING_SNAKE_CASE : int = resample
SCREAMING_SNAKE_CASE : Any = do_rescale
SCREAMING_SNAKE_CASE : List[Any] = rescale_factor
SCREAMING_SNAKE_CASE : Optional[int] = do_normalize
SCREAMING_SNAKE_CASE : str = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
SCREAMING_SNAKE_CASE : Union[str, Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __A ( self : Optional[Any] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Dict[str, int] , UpperCamelCase__ : bool = False , UpperCamelCase__ : int = 1 , UpperCamelCase__ : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Union[str, Any] , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = get_size_dict(UpperCamelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
SCREAMING_SNAKE_CASE : Any = get_resize_output_image_size(
UpperCamelCase__ , output_size=(size['''height'''], size['''width''']) , keep_aspect_ratio=UpperCamelCase__ , multiple=UpperCamelCase__ , )
return resize(UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def __A ( self : Dict , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Union[int, float] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : str , ):
'''simple docstring'''
return rescale(UpperCamelCase__ , scale=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def __A ( self : Any , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : List[str] , ):
'''simple docstring'''
return normalize(UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def __A ( self : Optional[Any] , UpperCamelCase__ : ImageInput , UpperCamelCase__ : bool = None , UpperCamelCase__ : int = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : int = None , UpperCamelCase__ : PILImageResampling = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : float = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , UpperCamelCase__ : ChannelDimension = ChannelDimension.FIRST , **UpperCamelCase__ : Optional[int] , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE : Optional[Any] = size if size is not None else self.size
SCREAMING_SNAKE_CASE : Union[str, Any] = get_size_dict(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
SCREAMING_SNAKE_CASE : List[str] = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
SCREAMING_SNAKE_CASE : Tuple = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE : Optional[Any] = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE : str = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE : List[Any] = image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE : List[Any] = image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE : Dict = make_list_of_images(UpperCamelCase__ )
if not valid_images(UpperCamelCase__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE : Tuple = [to_numpy_array(UpperCamelCase__ ) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE : Dict = [self.resize(image=UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE : Any = [self.rescale(image=UpperCamelCase__ , scale=UpperCamelCase__ ) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE : Any = [self.normalize(image=UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ ) for image in images]
SCREAMING_SNAKE_CASE : Optional[int] = [to_channel_dimension_format(UpperCamelCase__ , UpperCamelCase__ ) for image in images]
SCREAMING_SNAKE_CASE : Tuple = {'''pixel_values''': images}
return BatchFeature(data=UpperCamelCase__ , tensor_type=UpperCamelCase__ )
def __A ( self : Tuple , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[Tuple] = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(UpperCamelCase__ ) != len(UpperCamelCase__ ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE : List[Any] = target_sizes.numpy()
SCREAMING_SNAKE_CASE : Optional[int] = []
for idx in range(len(UpperCamelCase__ ) ):
SCREAMING_SNAKE_CASE : List[str] = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Dict = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(UpperCamelCase__ )
else:
SCREAMING_SNAKE_CASE : List[Any] = logits.argmax(dim=1 )
SCREAMING_SNAKE_CASE : List[Any] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 34
| 1
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = """openai/whisper-base"""
UpperCamelCase_ = (
"""This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the """
"""transcribed text."""
)
UpperCamelCase_ = """transcriber"""
UpperCamelCase_ = WhisperProcessor
UpperCamelCase_ = WhisperForConditionalGeneration
UpperCamelCase_ = ["""audio"""]
UpperCamelCase_ = ["""text"""]
def __A ( self : Optional[Any] , UpperCamelCase__ : Any ):
'''simple docstring'''
return self.pre_processor(UpperCamelCase__ , return_tensors='''pt''' ).input_features
def __A ( self : Any , UpperCamelCase__ : int ):
'''simple docstring'''
return self.model.generate(inputs=UpperCamelCase__ )
def __A ( self : Any , UpperCamelCase__ : Optional[int] ):
'''simple docstring'''
return self.pre_processor.batch_decode(UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ )[0]
| 34
|
import random
def A ( _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = [], [], []
for element in data:
if element < pivot:
less.append(_lowercase )
elif element > pivot:
greater.append(_lowercase )
else:
equal.append(_lowercase )
return less, equal, greater
def A ( _lowercase , _lowercase ):
# index = len(items) // 2 when trying to find the median
# (value of index when items is sorted)
# invalid input
if index >= len(_lowercase ) or index < 0:
return None
SCREAMING_SNAKE_CASE : Dict = items[random.randint(0 , len(_lowercase ) - 1 )]
SCREAMING_SNAKE_CASE : int = 0
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = _partition(_lowercase , _lowercase )
SCREAMING_SNAKE_CASE : List[Any] = len(_lowercase )
SCREAMING_SNAKE_CASE : Optional[Any] = len(_lowercase )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(_lowercase , _lowercase )
# must be in larger
else:
return quick_select(_lowercase , index - (m + count) )
| 34
| 1
|
import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
__UpperCamelCase : List[Any] = {
'cola': 2,
'mnli': 3,
'mrpc': 2,
'sst-2': 2,
'sts-b': 1,
'qqp': 2,
'qnli': 2,
'rte': 2,
'wnli': 2,
}
logging.set_verbosity_info()
def A ( _lowercase , _lowercase , _lowercase , _lowercase=None ):
# Initialise PyTorch model
SCREAMING_SNAKE_CASE : Optional[int] = XLNetConfig.from_json_file(_lowercase )
SCREAMING_SNAKE_CASE : List[Any] = finetuning_task.lower() if finetuning_task is not None else ''''''
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(f"""Building PyTorch XLNetForSequenceClassification model from configuration: {config}""" )
SCREAMING_SNAKE_CASE : int = finetuning_task
SCREAMING_SNAKE_CASE : Dict = GLUE_TASKS_NUM_LABELS[finetuning_task]
SCREAMING_SNAKE_CASE : Union[str, Any] = XLNetForSequenceClassification(_lowercase )
elif "squad" in finetuning_task:
SCREAMING_SNAKE_CASE : int = finetuning_task
SCREAMING_SNAKE_CASE : Union[str, Any] = XLNetForQuestionAnswering(_lowercase )
else:
SCREAMING_SNAKE_CASE : Dict = XLNetLMHeadModel(_lowercase )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(_lowercase , _lowercase , _lowercase )
# Save pytorch-model
SCREAMING_SNAKE_CASE : str = os.path.join(_lowercase , _lowercase )
SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.join(_lowercase , _lowercase )
print(f"""Save PyTorch model to {os.path.abspath(_lowercase )}""" )
torch.save(model.state_dict() , _lowercase )
print(f"""Save configuration file to {os.path.abspath(_lowercase )}""" )
with open(_lowercase , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__UpperCamelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--xlnet_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained XLNet model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the folder to store the PyTorch model or dataset/vocab.',
)
parser.add_argument(
'--finetuning_task',
default=None,
type=str,
help='Name of a task on which the XLNet TensorFlow model was fine-tuned',
)
__UpperCamelCase : List[Any] = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 34
|
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : Tuple = logging.get_logger(__name__)
# TODO Update this
__UpperCamelCase : List[str] = {
'facebook/esm-1b': 'https://huggingface.co/facebook/esm-1b/resolve/main/config.json',
# See all ESM models at https://huggingface.co/models?filter=esm
}
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = """esm"""
def __init__( self : Tuple , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : int=None , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : Any=768 , UpperCamelCase__ : Optional[Any]=12 , UpperCamelCase__ : str=12 , UpperCamelCase__ : Optional[int]=3072 , UpperCamelCase__ : Optional[Any]=0.1 , UpperCamelCase__ : Any=0.1 , UpperCamelCase__ : Union[str, Any]=1026 , UpperCamelCase__ : Dict=0.02 , UpperCamelCase__ : Any=1E-12 , UpperCamelCase__ : Dict="absolute" , UpperCamelCase__ : Optional[Any]=True , UpperCamelCase__ : int=None , UpperCamelCase__ : Optional[int]=False , UpperCamelCase__ : Tuple=False , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : Optional[Any]=None , **UpperCamelCase__ : Any , ):
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase__ , mask_token_id=UpperCamelCase__ , **UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = vocab_size
SCREAMING_SNAKE_CASE : Any = hidden_size
SCREAMING_SNAKE_CASE : Union[str, Any] = num_hidden_layers
SCREAMING_SNAKE_CASE : Dict = num_attention_heads
SCREAMING_SNAKE_CASE : Any = intermediate_size
SCREAMING_SNAKE_CASE : str = hidden_dropout_prob
SCREAMING_SNAKE_CASE : List[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : str = max_position_embeddings
SCREAMING_SNAKE_CASE : Tuple = initializer_range
SCREAMING_SNAKE_CASE : Optional[int] = layer_norm_eps
SCREAMING_SNAKE_CASE : Dict = position_embedding_type
SCREAMING_SNAKE_CASE : Any = use_cache
SCREAMING_SNAKE_CASE : Dict = emb_layer_norm_before
SCREAMING_SNAKE_CASE : List[str] = token_dropout
SCREAMING_SNAKE_CASE : List[Any] = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info('''No esmfold_config supplied for folding model, using default values.''' )
SCREAMING_SNAKE_CASE : List[Any] = EsmFoldConfig()
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE : Union[str, Any] = EsmFoldConfig(**UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = esmfold_config
if vocab_list is None:
logger.warning('''No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!''' )
SCREAMING_SNAKE_CASE : Optional[int] = get_default_vocab_list()
else:
SCREAMING_SNAKE_CASE : Optional[Any] = vocab_list
else:
SCREAMING_SNAKE_CASE : str = None
SCREAMING_SNAKE_CASE : int = None
if self.esmfold_config is not None and getattr(self.esmfold_config , '''use_esm_attn_map''' , UpperCamelCase__ ):
raise ValueError('''The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!''' )
def __A ( self : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = super().to_dict()
if isinstance(self.esmfold_config , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE : Optional[Any] = self.esmfold_config.to_dict()
return output
@dataclass
class lowercase__ :
UpperCamelCase_ = None
UpperCamelCase_ = True
UpperCamelCase_ = False
UpperCamelCase_ = False
UpperCamelCase_ = False
UpperCamelCase_ = 0
UpperCamelCase_ = True
UpperCamelCase_ = False
UpperCamelCase_ = 128
UpperCamelCase_ = None
def __A ( self : Optional[int] ):
'''simple docstring'''
if self.trunk is None:
SCREAMING_SNAKE_CASE : Optional[Any] = TrunkConfig()
elif isinstance(self.trunk , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE : Tuple = TrunkConfig(**self.trunk )
def __A ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = asdict(self )
SCREAMING_SNAKE_CASE : Tuple = self.trunk.to_dict()
return output
@dataclass
class lowercase__ :
UpperCamelCase_ = 48
UpperCamelCase_ = 1_024
UpperCamelCase_ = 128
UpperCamelCase_ = 32
UpperCamelCase_ = 32
UpperCamelCase_ = 32
UpperCamelCase_ = 0
UpperCamelCase_ = 0
UpperCamelCase_ = False
UpperCamelCase_ = 4
UpperCamelCase_ = 128
UpperCamelCase_ = None
def __A ( self : Any ):
'''simple docstring'''
if self.structure_module is None:
SCREAMING_SNAKE_CASE : Optional[int] = StructureModuleConfig()
elif isinstance(self.structure_module , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE : Optional[Any] = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(f"""`max_recycles` should be positive, got {self.max_recycles}.""" )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
'''`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got'''
f""" {self.sequence_state_dim} and {self.sequence_state_dim}.""" )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
'''`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got'''
f""" {self.pairwise_state_dim} and {self.pairwise_state_dim}.""" )
SCREAMING_SNAKE_CASE : Dict = self.sequence_state_dim // self.sequence_head_width
SCREAMING_SNAKE_CASE : Tuple = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
'''`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got'''
f""" {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.""" )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
'''`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got'''
f""" {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.""" )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(f"""`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.""" )
if self.dropout >= 0.4:
raise ValueError(f"""`dropout` should not be greater than 0.4, got {self.dropout}.""" )
def __A ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = asdict(self )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.structure_module.to_dict()
return output
@dataclass
class lowercase__ :
UpperCamelCase_ = 384
UpperCamelCase_ = 128
UpperCamelCase_ = 16
UpperCamelCase_ = 128
UpperCamelCase_ = 12
UpperCamelCase_ = 4
UpperCamelCase_ = 8
UpperCamelCase_ = 0.1
UpperCamelCase_ = 8
UpperCamelCase_ = 1
UpperCamelCase_ = 2
UpperCamelCase_ = 7
UpperCamelCase_ = 10
UpperCamelCase_ = 1E-8
UpperCamelCase_ = 1E5
def __A ( self : Dict ):
'''simple docstring'''
return asdict(self )
def A ( ):
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 34
| 1
|
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def A ( _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE : List[Any] = []
for part_id in partition_order:
SCREAMING_SNAKE_CASE : Optional[Any] = df.where(f"""SPARK_PARTITION_ID() = {part_id}""" ).collect()
for row_idx, row in enumerate(_lowercase ):
expected_row_ids_and_row_dicts.append((f"""{part_id}_{row_idx}""", row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def A ( ):
SCREAMING_SNAKE_CASE : int = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
SCREAMING_SNAKE_CASE : Tuple = spark.range(100 ).repartition(1 )
SCREAMING_SNAKE_CASE : Optional[int] = Spark(_lowercase )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def A ( ):
SCREAMING_SNAKE_CASE : Dict = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
SCREAMING_SNAKE_CASE : List[Any] = spark.range(10 ).repartition(2 )
SCREAMING_SNAKE_CASE : Optional[int] = [1, 0]
SCREAMING_SNAKE_CASE : int = _generate_iterable_examples(_lowercase , _lowercase ) # Reverse the partitions.
SCREAMING_SNAKE_CASE : str = _get_expected_row_ids_and_row_dicts_for_partition_order(_lowercase , _lowercase )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def A ( ):
SCREAMING_SNAKE_CASE : Optional[Any] = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
SCREAMING_SNAKE_CASE : Optional[int] = spark.range(10 ).repartition(1 )
SCREAMING_SNAKE_CASE : List[Any] = SparkExamplesIterable(_lowercase )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(_lowercase ):
assert row_id == f"""0_{i}"""
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def A ( ):
SCREAMING_SNAKE_CASE : Tuple = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
SCREAMING_SNAKE_CASE : Dict = spark.range(30 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch('''numpy.random.Generator''' ) as generator_mock:
SCREAMING_SNAKE_CASE : Tuple = lambda _lowercase : x.reverse()
SCREAMING_SNAKE_CASE : Tuple = _get_expected_row_ids_and_row_dicts_for_partition_order(_lowercase , [2, 1, 0] )
SCREAMING_SNAKE_CASE : str = SparkExamplesIterable(_lowercase ).shuffle_data_sources(_lowercase )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(_lowercase ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def A ( ):
SCREAMING_SNAKE_CASE : Union[str, Any] = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
SCREAMING_SNAKE_CASE : Union[str, Any] = spark.range(20 ).repartition(4 )
# Partitions 0 and 2
SCREAMING_SNAKE_CASE : int = SparkExamplesIterable(_lowercase ).shard_data_sources(worker_id=0 , num_workers=2 )
assert shard_it_a.n_shards == 2
SCREAMING_SNAKE_CASE : str = _get_expected_row_ids_and_row_dicts_for_partition_order(_lowercase , [0, 2] )
for i, (row_id, row_dict) in enumerate(_lowercase ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
SCREAMING_SNAKE_CASE : Any = SparkExamplesIterable(_lowercase ).shard_data_sources(worker_id=1 , num_workers=2 )
assert shard_it_a.n_shards == 2
SCREAMING_SNAKE_CASE : Dict = _get_expected_row_ids_and_row_dicts_for_partition_order(_lowercase , [1, 3] )
for i, (row_id, row_dict) in enumerate(_lowercase ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def A ( ):
SCREAMING_SNAKE_CASE : Union[str, Any] = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
SCREAMING_SNAKE_CASE : List[Any] = spark.range(100 ).repartition(1 )
SCREAMING_SNAKE_CASE : Dict = Spark(_lowercase )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 100
| 34
|
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("""TEST_SAGEMAKER""" , """False""")) is not True , reason="""Skipping test because should only be run when releasing minor transformers version""" , )
@pytest.mark.usefixtures("""sm_env""")
@parameterized_class(
[
{
"""framework""": """pytorch""",
"""script""": """run_glue.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.p3.16xlarge""",
"""results""": {"""train_runtime""": 650, """eval_accuracy""": 0.7, """eval_loss""": 0.6},
},
{
"""framework""": """pytorch""",
"""script""": """run_ddp.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.p3.16xlarge""",
"""results""": {"""train_runtime""": 600, """eval_accuracy""": 0.7, """eval_loss""": 0.6},
},
{
"""framework""": """tensorflow""",
"""script""": """run_tf_dist.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.p3.16xlarge""",
"""results""": {"""train_runtime""": 600, """eval_accuracy""": 0.6, """eval_loss""": 0.7},
},
])
class lowercase__ ( unittest.TestCase):
def __A ( self : Any ):
'''simple docstring'''
if self.framework == "pytorch":
subprocess.run(
f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding='''utf-8''' , check=UpperCamelCase__ , )
assert hasattr(self , '''env''' )
def __A ( self : str , UpperCamelCase__ : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = f"""{self.env.base_job_name}-{instance_count}-{'ddp' if 'ddp' in self.script else 'smd'}"""
# distributed data settings
SCREAMING_SNAKE_CASE : Any = {'''smdistributed''': {'''dataparallel''': {'''enabled''': True}}} if self.script != '''run_ddp.py''' else None
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=UpperCamelCase__ , instance_count=UpperCamelCase__ , instance_type=self.instance_type , debugger_hook_config=UpperCamelCase__ , hyperparameters={**self.env.distributed_hyperparameters, '''model_name_or_path''': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=UpperCamelCase__ , py_version='''py36''' , )
def __A ( self : Optional[Any] , UpperCamelCase__ : List[str] ):
'''simple docstring'''
TrainingJobAnalytics(UpperCamelCase__ ).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(2,)] )
def __A ( self : Tuple , UpperCamelCase__ : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.create_estimator(UpperCamelCase__ )
# run training
estimator.fit()
# result dataframe
SCREAMING_SNAKE_CASE : Optional[Any] = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
SCREAMING_SNAKE_CASE : List[str] = list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''] )
SCREAMING_SNAKE_CASE : Dict = list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
SCREAMING_SNAKE_CASE : List[Any] = (
Session().describe_training_job(estimator.latest_training_job.name ).get('''TrainingTimeInSeconds''' , 99_9999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy )
assert all(t <= self.results['''eval_loss'''] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f"""{estimator.latest_training_job.name}.json""" , '''w''' ) as outfile:
json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} , UpperCamelCase__ )
| 34
| 1
|
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class lowercase__ ( UpperCamelCase_ , unittest.TestCase):
UpperCamelCase_ = BarthezTokenizer
UpperCamelCase_ = BarthezTokenizerFast
UpperCamelCase_ = True
UpperCamelCase_ = True
def __A ( self : Dict ):
'''simple docstring'''
super().setUp()
SCREAMING_SNAKE_CASE : Tuple = BarthezTokenizerFast.from_pretrained('''moussaKam/mbarthez''' )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : str = tokenizer
def __A ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = '''<pad>'''
SCREAMING_SNAKE_CASE : str = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase__ ) , UpperCamelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase__ ) , UpperCamelCase__ )
def __A ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-1] , '''<mask>''' )
self.assertEqual(len(UpperCamelCase__ ) , 10_1122 )
def __A ( self : Optional[Any] ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 10_1122 )
@require_torch
def __A ( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
SCREAMING_SNAKE_CASE : List[str] = [0, 57, 3018, 7_0307, 91, 2]
SCREAMING_SNAKE_CASE : Any = self.tokenizer(
UpperCamelCase__ , max_length=len(UpperCamelCase__ ) , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , return_tensors='''pt''' )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
SCREAMING_SNAKE_CASE : List[Any] = batch.input_ids.tolist()[0]
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
def __A ( self : List[Any] ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
SCREAMING_SNAKE_CASE : List[str] = self.get_tokenizer()
SCREAMING_SNAKE_CASE : List[str] = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE : int = '''I was born in 92000, and this is falsé.'''
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.tokenize(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Tuple = rust_tokenizer.tokenize(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : int = tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[str] = rust_tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[str] = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.encode(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Any = rust_tokenizer.encode(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
@slow
def __A ( self : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = {'''input_ids''': [[0, 490, 1_4328, 4507, 354, 47, 4_3669, 95, 25, 7_8117, 2_0215, 1_9779, 190, 22, 400, 4, 3_5343, 8_0310, 603, 86, 2_4937, 105, 3_3438, 9_4762, 196, 3_9642, 7, 15, 1_5933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1_0534, 87, 25, 66, 3358, 196, 5_5289, 8, 8_2961, 81, 2204, 7_5203, 7, 15, 763, 1_2956, 216, 178, 1_4328, 9595, 1377, 6_9693, 7, 448, 7_1021, 196, 1_8106, 1437, 1_3974, 108, 9083, 4, 4_9315, 7, 39, 86, 1326, 2793, 4_6333, 4, 448, 196, 7_4588, 7, 4_9315, 7, 39, 21, 822, 3_8470, 74, 21, 6_6723, 6_2480, 8, 2_2050, 5, 2]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
SCREAMING_SNAKE_CASE : List[str] = [
'''Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '''
'''utilisé principalement dans le domaine du traitement automatique des langues (TAL).''',
'''À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '''
'''pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '''
'''telles que la traduction et la synthèse de texte.''',
]
self.tokenizer_integration_test_util(
expected_encoding=UpperCamelCase__ , model_name='''moussaKam/mbarthez''' , revision='''c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6''' , sequences=UpperCamelCase__ , )
| 34
|
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
__UpperCamelCase : Dict = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
__UpperCamelCase : Tuple = [file for file in filepaths if file != file.lower()]
if upper_files:
print(f"""{len(upper_files)} files contain uppercase characters:""")
print('\n'.join(upper_files) + '\n')
__UpperCamelCase : List[Any] = [file for file in filepaths if ' ' in file]
if space_files:
print(f"""{len(space_files)} files contain space characters:""")
print('\n'.join(space_files) + '\n')
__UpperCamelCase : List[Any] = [file for file in filepaths if '-' in file]
if hyphen_files:
print(f"""{len(hyphen_files)} files contain hyphen characters:""")
print('\n'.join(hyphen_files) + '\n')
__UpperCamelCase : List[Any] = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(f"""{len(nodir_files)} files are not in a directory:""")
print('\n'.join(nodir_files) + '\n')
__UpperCamelCase : Optional[Any] = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 34
| 1
|
import unittest
from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class lowercase__ :
@staticmethod
def __A ( *UpperCamelCase__ : Tuple , **UpperCamelCase__ : List[str] ):
'''simple docstring'''
pass
@is_pipeline_test
@require_torch
@require_vision
class lowercase__ ( unittest.TestCase):
UpperCamelCase_ = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
def __A ( self : Optional[int] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = pipeline('''visual-question-answering''' , model='''hf-internal-testing/tiny-vilt-random-vqa''' )
SCREAMING_SNAKE_CASE : Any = [
{
'''image''': Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ),
'''question''': '''How many cats are there?''',
},
{
'''image''': '''./tests/fixtures/tests_samples/COCO/000000039769.png''',
'''question''': '''How many cats are there?''',
},
]
return vqa_pipeline, examples
def __A ( self : Optional[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = vqa_pipeline(UpperCamelCase__ , top_k=1 )
self.assertEqual(
UpperCamelCase__ , [
[{'''score''': ANY(UpperCamelCase__ ), '''answer''': ANY(UpperCamelCase__ )}],
[{'''score''': ANY(UpperCamelCase__ ), '''answer''': ANY(UpperCamelCase__ )}],
] , )
@require_torch
def __A ( self : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = pipeline('''visual-question-answering''' , model='''hf-internal-testing/tiny-vilt-random-vqa''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
SCREAMING_SNAKE_CASE : Optional[int] = '''How many cats are there?'''
SCREAMING_SNAKE_CASE : Tuple = vqa_pipeline(image=UpperCamelCase__ , question='''How many cats are there?''' , top_k=2 )
self.assertEqual(
UpperCamelCase__ , [{'''score''': ANY(UpperCamelCase__ ), '''answer''': ANY(UpperCamelCase__ )}, {'''score''': ANY(UpperCamelCase__ ), '''answer''': ANY(UpperCamelCase__ )}] )
SCREAMING_SNAKE_CASE : Any = vqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
UpperCamelCase__ , [{'''score''': ANY(UpperCamelCase__ ), '''answer''': ANY(UpperCamelCase__ )}, {'''score''': ANY(UpperCamelCase__ ), '''answer''': ANY(UpperCamelCase__ )}] )
@slow
@require_torch
def __A ( self : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = pipeline('''visual-question-answering''' , model='''dandelin/vilt-b32-finetuned-vqa''' )
SCREAMING_SNAKE_CASE : str = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
SCREAMING_SNAKE_CASE : Optional[Any] = '''How many cats are there?'''
SCREAMING_SNAKE_CASE : Dict = vqa_pipeline(image=UpperCamelCase__ , question=UpperCamelCase__ , top_k=2 )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=4 ) , [{'''score''': 0.8799, '''answer''': '''2'''}, {'''score''': 0.296, '''answer''': '''1'''}] )
SCREAMING_SNAKE_CASE : Optional[Any] = vqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=4 ) , [{'''score''': 0.8799, '''answer''': '''2'''}, {'''score''': 0.296, '''answer''': '''1'''}] )
SCREAMING_SNAKE_CASE : Any = vqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=4 ) , [[{'''score''': 0.8799, '''answer''': '''2'''}, {'''score''': 0.296, '''answer''': '''1'''}]] * 2 , )
@require_tf
@unittest.skip('''Visual question answering not implemented in TF''' )
def __A ( self : Tuple ):
'''simple docstring'''
pass
| 34
|
import os
import re
import warnings
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_ta import TaTokenizer
else:
__UpperCamelCase : Dict = None
__UpperCamelCase : Tuple = logging.get_logger(__name__)
__UpperCamelCase : Optional[int] = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
__UpperCamelCase : Optional[int] = {
'vocab_file': {
't5-small': 'https://huggingface.co/t5-small/resolve/main/spiece.model',
't5-base': 'https://huggingface.co/t5-base/resolve/main/spiece.model',
't5-large': 'https://huggingface.co/t5-large/resolve/main/spiece.model',
't5-3b': 'https://huggingface.co/t5-3b/resolve/main/spiece.model',
't5-11b': 'https://huggingface.co/t5-11b/resolve/main/spiece.model',
},
'tokenizer_file': {
't5-small': 'https://huggingface.co/t5-small/resolve/main/tokenizer.json',
't5-base': 'https://huggingface.co/t5-base/resolve/main/tokenizer.json',
't5-large': 'https://huggingface.co/t5-large/resolve/main/tokenizer.json',
't5-3b': 'https://huggingface.co/t5-3b/resolve/main/tokenizer.json',
't5-11b': 'https://huggingface.co/t5-11b/resolve/main/tokenizer.json',
},
}
# TODO(PVP) - this should be removed in Transformers v5
__UpperCamelCase : Union[str, Any] = {
't5-small': 512,
't5-base': 512,
't5-large': 512,
't5-3b': 512,
't5-11b': 512,
}
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = VOCAB_FILES_NAMES
UpperCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ = ["""input_ids""", """attention_mask"""]
UpperCamelCase_ = TaTokenizer
UpperCamelCase_ = []
def __init__( self : str , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : Dict=None , UpperCamelCase__ : str="</s>" , UpperCamelCase__ : str="<unk>" , UpperCamelCase__ : Optional[int]="<pad>" , UpperCamelCase__ : Optional[Any]=100 , UpperCamelCase__ : List[Any]=None , **UpperCamelCase__ : str , ):
'''simple docstring'''
if extra_ids > 0 and additional_special_tokens is None:
SCREAMING_SNAKE_CASE : List[str] = [f"""<extra_id_{i}>""" for i in range(UpperCamelCase__ )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra special tokens
SCREAMING_SNAKE_CASE : int = len(set(filter(lambda UpperCamelCase__ : bool('''extra_id_''' in str(UpperCamelCase__ ) ) , UpperCamelCase__ ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f"""Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"""
''' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids'''
''' tokens''' )
super().__init__(
UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , extra_ids=UpperCamelCase__ , additional_special_tokens=UpperCamelCase__ , **UpperCamelCase__ , )
SCREAMING_SNAKE_CASE : str = vocab_file
SCREAMING_SNAKE_CASE : int = False if not self.vocab_file else True
SCREAMING_SNAKE_CASE : str = extra_ids
@staticmethod
def __A ( UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes:
SCREAMING_SNAKE_CASE : List[str] = TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
'''This tokenizer was incorrectly instantiated with a model max length of'''
f""" {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this"""
''' behavior is kept to avoid breaking backwards compatibility when padding/encoding with'''
''' `truncation is True`.\n- Be aware that you SHOULD NOT rely on'''
f""" {pretrained_model_name_or_path} automatically truncating your input to"""
f""" {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences"""
f""" longer than {deprecated_max_model_length} you can either instantiate this tokenizer with"""
''' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please'''
''' instantiate this tokenizer with `model_max_length` set to your preferred value.''' , UpperCamelCase__ , )
return max_model_length
def __A ( self : Tuple , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(UpperCamelCase__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
SCREAMING_SNAKE_CASE : Any = os.path.join(
UpperCamelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ):
copyfile(self.vocab_file , UpperCamelCase__ )
logger.info(f"""Copy vocab file to {out_vocab_file}""" )
return (out_vocab_file,)
def __A ( self : Optional[Any] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return self.prefix_tokens + token_ids_a
else:
SCREAMING_SNAKE_CASE : Tuple = token_ids_a + [self.eos_token_id]
return self.prefix_tokens + token_ids_a + token_ids_a
def __A ( self : Any , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def __A ( self : Dict ):
'''simple docstring'''
return list(
set(filter(lambda UpperCamelCase__ : bool(re.search(r'''<extra_id_\d+>''' , UpperCamelCase__ ) ) is not None , self.additional_special_tokens ) ) )
def __A ( self : List[Any] ):
'''simple docstring'''
return [self.convert_tokens_to_ids(UpperCamelCase__ ) for token in self.get_sentinel_tokens()]
| 34
| 1
|
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class lowercase__ ( unittest.TestCase):
def __A ( self : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = {
'''task_specific_params''': {
'''summarization''': {'''length_penalty''': 1.0, '''max_length''': 128, '''min_length''': 12, '''num_beams''': 4},
'''summarization_cnn''': {'''length_penalty''': 2.0, '''max_length''': 142, '''min_length''': 56, '''num_beams''': 4},
'''summarization_xsum''': {'''length_penalty''': 1.0, '''max_length''': 62, '''min_length''': 11, '''num_beams''': 6},
}
}
SCREAMING_SNAKE_CASE : List[str] = {
'''task_specific_params.summarization.length_penalty''': 1.0,
'''task_specific_params.summarization.max_length''': 128,
'''task_specific_params.summarization.min_length''': 12,
'''task_specific_params.summarization.num_beams''': 4,
'''task_specific_params.summarization_cnn.length_penalty''': 2.0,
'''task_specific_params.summarization_cnn.max_length''': 142,
'''task_specific_params.summarization_cnn.min_length''': 56,
'''task_specific_params.summarization_cnn.num_beams''': 4,
'''task_specific_params.summarization_xsum.length_penalty''': 1.0,
'''task_specific_params.summarization_xsum.max_length''': 62,
'''task_specific_params.summarization_xsum.min_length''': 11,
'''task_specific_params.summarization_xsum.num_beams''': 6,
}
self.assertEqual(flatten_dict(UpperCamelCase__ ) , UpperCamelCase__ )
def __A ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(transpose(UpperCamelCase__ ) , x.transpose() ) )
SCREAMING_SNAKE_CASE : str = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(transpose(UpperCamelCase__ , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) )
@require_torch
def __A ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = np.random.randn(3 , 4 )
SCREAMING_SNAKE_CASE : str = torch.tensor(UpperCamelCase__ )
self.assertTrue(np.allclose(transpose(UpperCamelCase__ ) , transpose(UpperCamelCase__ ).numpy() ) )
SCREAMING_SNAKE_CASE : Optional[int] = np.random.randn(3 , 4 , 5 )
SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor(UpperCamelCase__ )
self.assertTrue(np.allclose(transpose(UpperCamelCase__ , axes=(1, 2, 0) ) , transpose(UpperCamelCase__ , axes=(1, 2, 0) ).numpy() ) )
@require_tf
def __A ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = np.random.randn(3 , 4 )
SCREAMING_SNAKE_CASE : Union[str, Any] = tf.constant(UpperCamelCase__ )
self.assertTrue(np.allclose(transpose(UpperCamelCase__ ) , transpose(UpperCamelCase__ ).numpy() ) )
SCREAMING_SNAKE_CASE : Any = np.random.randn(3 , 4 , 5 )
SCREAMING_SNAKE_CASE : Dict = tf.constant(UpperCamelCase__ )
self.assertTrue(np.allclose(transpose(UpperCamelCase__ , axes=(1, 2, 0) ) , transpose(UpperCamelCase__ , axes=(1, 2, 0) ).numpy() ) )
@require_flax
def __A ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = np.random.randn(3 , 4 )
SCREAMING_SNAKE_CASE : List[str] = jnp.array(UpperCamelCase__ )
self.assertTrue(np.allclose(transpose(UpperCamelCase__ ) , np.asarray(transpose(UpperCamelCase__ ) ) ) )
SCREAMING_SNAKE_CASE : Any = np.random.randn(3 , 4 , 5 )
SCREAMING_SNAKE_CASE : Any = jnp.array(UpperCamelCase__ )
self.assertTrue(np.allclose(transpose(UpperCamelCase__ , axes=(1, 2, 0) ) , np.asarray(transpose(UpperCamelCase__ , axes=(1, 2, 0) ) ) ) )
def __A ( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(reshape(UpperCamelCase__ , (4, 3) ) , np.reshape(UpperCamelCase__ , (4, 3) ) ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(reshape(UpperCamelCase__ , (12, 5) ) , np.reshape(UpperCamelCase__ , (12, 5) ) ) )
@require_torch
def __A ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = np.random.randn(3 , 4 )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor(UpperCamelCase__ )
self.assertTrue(np.allclose(reshape(UpperCamelCase__ , (4, 3) ) , reshape(UpperCamelCase__ , (4, 3) ).numpy() ) )
SCREAMING_SNAKE_CASE : int = np.random.randn(3 , 4 , 5 )
SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor(UpperCamelCase__ )
self.assertTrue(np.allclose(reshape(UpperCamelCase__ , (12, 5) ) , reshape(UpperCamelCase__ , (12, 5) ).numpy() ) )
@require_tf
def __A ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = np.random.randn(3 , 4 )
SCREAMING_SNAKE_CASE : Optional[int] = tf.constant(UpperCamelCase__ )
self.assertTrue(np.allclose(reshape(UpperCamelCase__ , (4, 3) ) , reshape(UpperCamelCase__ , (4, 3) ).numpy() ) )
SCREAMING_SNAKE_CASE : Any = np.random.randn(3 , 4 , 5 )
SCREAMING_SNAKE_CASE : Optional[int] = tf.constant(UpperCamelCase__ )
self.assertTrue(np.allclose(reshape(UpperCamelCase__ , (12, 5) ) , reshape(UpperCamelCase__ , (12, 5) ).numpy() ) )
@require_flax
def __A ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = np.random.randn(3 , 4 )
SCREAMING_SNAKE_CASE : Union[str, Any] = jnp.array(UpperCamelCase__ )
self.assertTrue(np.allclose(reshape(UpperCamelCase__ , (4, 3) ) , np.asarray(reshape(UpperCamelCase__ , (4, 3) ) ) ) )
SCREAMING_SNAKE_CASE : int = np.random.randn(3 , 4 , 5 )
SCREAMING_SNAKE_CASE : List[str] = jnp.array(UpperCamelCase__ )
self.assertTrue(np.allclose(reshape(UpperCamelCase__ , (12, 5) ) , np.asarray(reshape(UpperCamelCase__ , (12, 5) ) ) ) )
def __A ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = np.random.randn(1 , 3 , 4 )
self.assertTrue(np.allclose(squeeze(UpperCamelCase__ ) , np.squeeze(UpperCamelCase__ ) ) )
SCREAMING_SNAKE_CASE : Dict = np.random.randn(1 , 4 , 1 , 5 )
self.assertTrue(np.allclose(squeeze(UpperCamelCase__ , axis=2 ) , np.squeeze(UpperCamelCase__ , axis=2 ) ) )
@require_torch
def __A ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = np.random.randn(1 , 3 , 4 )
SCREAMING_SNAKE_CASE : List[str] = torch.tensor(UpperCamelCase__ )
self.assertTrue(np.allclose(squeeze(UpperCamelCase__ ) , squeeze(UpperCamelCase__ ).numpy() ) )
SCREAMING_SNAKE_CASE : Dict = np.random.randn(1 , 4 , 1 , 5 )
SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor(UpperCamelCase__ )
self.assertTrue(np.allclose(squeeze(UpperCamelCase__ , axis=2 ) , squeeze(UpperCamelCase__ , axis=2 ).numpy() ) )
@require_tf
def __A ( self : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = np.random.randn(1 , 3 , 4 )
SCREAMING_SNAKE_CASE : Dict = tf.constant(UpperCamelCase__ )
self.assertTrue(np.allclose(squeeze(UpperCamelCase__ ) , squeeze(UpperCamelCase__ ).numpy() ) )
SCREAMING_SNAKE_CASE : List[Any] = np.random.randn(1 , 4 , 1 , 5 )
SCREAMING_SNAKE_CASE : Optional[Any] = tf.constant(UpperCamelCase__ )
self.assertTrue(np.allclose(squeeze(UpperCamelCase__ , axis=2 ) , squeeze(UpperCamelCase__ , axis=2 ).numpy() ) )
@require_flax
def __A ( self : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = np.random.randn(1 , 3 , 4 )
SCREAMING_SNAKE_CASE : str = jnp.array(UpperCamelCase__ )
self.assertTrue(np.allclose(squeeze(UpperCamelCase__ ) , np.asarray(squeeze(UpperCamelCase__ ) ) ) )
SCREAMING_SNAKE_CASE : str = np.random.randn(1 , 4 , 1 , 5 )
SCREAMING_SNAKE_CASE : Optional[int] = jnp.array(UpperCamelCase__ )
self.assertTrue(np.allclose(squeeze(UpperCamelCase__ , axis=2 ) , np.asarray(squeeze(UpperCamelCase__ , axis=2 ) ) ) )
def __A ( self : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(expand_dims(UpperCamelCase__ , axis=1 ) , np.expand_dims(UpperCamelCase__ , axis=1 ) ) )
@require_torch
def __A ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = np.random.randn(3 , 4 )
SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor(UpperCamelCase__ )
self.assertTrue(np.allclose(expand_dims(UpperCamelCase__ , axis=1 ) , expand_dims(UpperCamelCase__ , axis=1 ).numpy() ) )
@require_tf
def __A ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = np.random.randn(3 , 4 )
SCREAMING_SNAKE_CASE : str = tf.constant(UpperCamelCase__ )
self.assertTrue(np.allclose(expand_dims(UpperCamelCase__ , axis=1 ) , expand_dims(UpperCamelCase__ , axis=1 ).numpy() ) )
@require_flax
def __A ( self : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = np.random.randn(3 , 4 )
SCREAMING_SNAKE_CASE : Optional[int] = jnp.array(UpperCamelCase__ )
self.assertTrue(np.allclose(expand_dims(UpperCamelCase__ , axis=1 ) , np.asarray(expand_dims(UpperCamelCase__ , axis=1 ) ) ) )
| 34
|
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
__UpperCamelCase : str = False
class lowercase__ ( unittest.TestCase):
pass
@nightly
@require_torch_gpu
class lowercase__ ( unittest.TestCase):
def __A ( self : Union[str, Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
SCREAMING_SNAKE_CASE : Optional[Any] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Dict = pipe.dual_guided(
prompt='''first prompt''' , image=UpperCamelCase__ , text_to_image_strength=0.75 , generator=UpperCamelCase__ , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = VersatileDiffusionPipeline.from_pretrained(UpperCamelCase__ , torch_dtype=torch.floataa )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = generator.manual_seed(0 )
SCREAMING_SNAKE_CASE : int = pipe.dual_guided(
prompt='''first prompt''' , image=UpperCamelCase__ , text_to_image_strength=0.75 , generator=UpperCamelCase__ , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def __A ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = '''cyberpunk 2077'''
SCREAMING_SNAKE_CASE : Tuple = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
SCREAMING_SNAKE_CASE : List[Any] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Tuple = pipe.dual_guided(
prompt=UpperCamelCase__ , image=UpperCamelCase__ , text_to_image_strength=0.75 , generator=UpperCamelCase__ , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' , ).images
SCREAMING_SNAKE_CASE : Tuple = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE : int = np.array([0.1448, 0.1619, 0.1741, 0.1086, 0.1147, 0.1128, 0.1199, 0.1165, 0.1001] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
SCREAMING_SNAKE_CASE : Optional[Any] = '''A painting of a squirrel eating a burger '''
SCREAMING_SNAKE_CASE : Optional[Any] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = pipe.text_to_image(
prompt=UpperCamelCase__ , generator=UpperCamelCase__ , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' ).images
SCREAMING_SNAKE_CASE : List[str] = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE : Any = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
SCREAMING_SNAKE_CASE : Optional[Any] = pipe.image_variation(UpperCamelCase__ , generator=UpperCamelCase__ , output_type='''numpy''' ).images
SCREAMING_SNAKE_CASE : Optional[int] = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE : Union[str, Any] = np.array([0.3076, 0.3123, 0.3284, 0.3782, 0.3770, 0.3894, 0.4297, 0.4331, 0.4456] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
| 34
| 1
|
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__UpperCamelCase : int = logging.get_logger(__name__)
__UpperCamelCase : Any = {'vocab_file': 'spiece.model'}
__UpperCamelCase : str = {
'vocab_file': {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/spiece.model',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/spiece.model',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/spiece.model',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/spiece.model',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model',
}
}
__UpperCamelCase : List[Any] = {
'albert-base-v1': 512,
'albert-large-v1': 512,
'albert-xlarge-v1': 512,
'albert-xxlarge-v1': 512,
'albert-base-v2': 512,
'albert-large-v2': 512,
'albert-xlarge-v2': 512,
'albert-xxlarge-v2': 512,
}
__UpperCamelCase : Dict = '▁'
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = VOCAB_FILES_NAMES
UpperCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : List[Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Any=True , UpperCamelCase__ : str=True , UpperCamelCase__ : str=False , UpperCamelCase__ : Optional[int]="[CLS]" , UpperCamelCase__ : str="[SEP]" , UpperCamelCase__ : Optional[int]="<unk>" , UpperCamelCase__ : str="[SEP]" , UpperCamelCase__ : Optional[int]="<pad>" , UpperCamelCase__ : Optional[Any]="[CLS]" , UpperCamelCase__ : List[str]="[MASK]" , UpperCamelCase__ : Optional[Dict[str, Any]] = None , **UpperCamelCase__ : List[Any] , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = (
AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ , normalized=UpperCamelCase__ )
if isinstance(UpperCamelCase__ , UpperCamelCase__ )
else mask_token
)
SCREAMING_SNAKE_CASE : str = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=UpperCamelCase__ , remove_space=UpperCamelCase__ , keep_accents=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase__ , )
SCREAMING_SNAKE_CASE : str = do_lower_case
SCREAMING_SNAKE_CASE : List[str] = remove_space
SCREAMING_SNAKE_CASE : Tuple = keep_accents
SCREAMING_SNAKE_CASE : Tuple = vocab_file
SCREAMING_SNAKE_CASE : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCamelCase__ )
@property
def __A ( self : Optional[Any] ):
'''simple docstring'''
return len(self.sp_model )
def __A ( self : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = {self.convert_ids_to_tokens(UpperCamelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self.__dict__.copy()
SCREAMING_SNAKE_CASE : Tuple = None
return state
def __setstate__( self : List[Any] , UpperCamelCase__ : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
SCREAMING_SNAKE_CASE : str = {}
SCREAMING_SNAKE_CASE : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __A ( self : Dict , UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
if self.remove_space:
SCREAMING_SNAKE_CASE : str = ''' '''.join(inputs.strip().split() )
else:
SCREAMING_SNAKE_CASE : Optional[int] = inputs
SCREAMING_SNAKE_CASE : List[str] = outputs.replace('''``''' , '''"''' ).replace('''\'\'''' , '''"''' )
if not self.keep_accents:
SCREAMING_SNAKE_CASE : Any = unicodedata.normalize('''NFKD''' , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : int = ''''''.join([c for c in outputs if not unicodedata.combining(UpperCamelCase__ )] )
if self.do_lower_case:
SCREAMING_SNAKE_CASE : Union[str, Any] = outputs.lower()
return outputs
def __A ( self : Any , UpperCamelCase__ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.preprocess_text(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.sp_model.encode(UpperCamelCase__ , out_type=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : str = []
for piece in pieces:
if len(UpperCamelCase__ ) > 1 and piece[-1] == str(''',''' ) and piece[-2].isdigit():
SCREAMING_SNAKE_CASE : Any = self.sp_model.EncodeAsPieces(piece[:-1].replace(UpperCamelCase__ , '''''' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
SCREAMING_SNAKE_CASE : Tuple = cur_pieces[1:]
else:
SCREAMING_SNAKE_CASE : List[Any] = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(UpperCamelCase__ )
else:
new_pieces.append(UpperCamelCase__ )
return new_pieces
def __A ( self : Dict , UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
return self.sp_model.PieceToId(UpperCamelCase__ )
def __A ( self : Union[str, Any] , UpperCamelCase__ : Tuple ):
'''simple docstring'''
return self.sp_model.IdToPiece(UpperCamelCase__ )
def __A ( self : List[Any] , UpperCamelCase__ : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = []
SCREAMING_SNAKE_CASE : List[str] = ''''''
SCREAMING_SNAKE_CASE : str = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(UpperCamelCase__ ) + token
SCREAMING_SNAKE_CASE : Union[str, Any] = True
SCREAMING_SNAKE_CASE : List[str] = []
else:
current_sub_tokens.append(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Tuple = False
out_string += self.sp_model.decode(UpperCamelCase__ )
return out_string.strip()
def __A ( self : Union[str, Any] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = [self.sep_token_id]
SCREAMING_SNAKE_CASE : int = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __A ( self : Dict , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None , UpperCamelCase__ : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase__ , token_ids_a=UpperCamelCase__ , already_has_special_tokens=UpperCamelCase__ )
if token_ids_a is not None:
return [1] + ([0] * len(UpperCamelCase__ )) + [1] + ([0] * len(UpperCamelCase__ )) + [1]
return [1] + ([0] * len(UpperCamelCase__ )) + [1]
def __A ( self : Tuple , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = [self.sep_token_id]
SCREAMING_SNAKE_CASE : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __A ( self : Dict , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(UpperCamelCase__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
SCREAMING_SNAKE_CASE : Dict = os.path.join(
UpperCamelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCamelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase__ , '''wb''' ) as fi:
SCREAMING_SNAKE_CASE : int = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase__ )
return (out_vocab_file,)
| 34
|
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def A ( _lowercase ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = analyze_text(_lowercase )
SCREAMING_SNAKE_CASE : Any = list(''' ''' + ascii_lowercase )
# what is our total sum of probabilities.
SCREAMING_SNAKE_CASE : Tuple = sum(single_char_strings.values() )
# one length string
SCREAMING_SNAKE_CASE : Tuple = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
SCREAMING_SNAKE_CASE : Tuple = single_char_strings[ch]
SCREAMING_SNAKE_CASE : List[str] = my_str / all_sum
my_fir_sum += prob * math.loga(_lowercase ) # entropy formula.
# print entropy
print(f"""{round(-1 * my_fir_sum ):.1f}""" )
# two len string
SCREAMING_SNAKE_CASE : Optional[Any] = sum(two_char_strings.values() )
SCREAMING_SNAKE_CASE : List[str] = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
SCREAMING_SNAKE_CASE : Union[str, Any] = cha + cha
if sequence in two_char_strings:
SCREAMING_SNAKE_CASE : Any = two_char_strings[sequence]
SCREAMING_SNAKE_CASE : Dict = int(_lowercase ) / all_sum
my_sec_sum += prob * math.loga(_lowercase )
# print second entropy
print(f"""{round(-1 * my_sec_sum ):.1f}""" )
# print the difference between them
print(f"""{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}""" )
def A ( _lowercase ):
SCREAMING_SNAKE_CASE : Tuple = Counter() # type: ignore
SCREAMING_SNAKE_CASE : Any = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 , len(_lowercase ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def A ( ):
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 34
| 1
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCamelCase : str = logging.get_logger(__name__)
__UpperCamelCase : int = {
'junnyu/roformer_chinese_small': 'https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json',
'junnyu/roformer_chinese_base': 'https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json',
'junnyu/roformer_chinese_char_small': (
'https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json'
),
'junnyu/roformer_chinese_char_base': (
'https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json'
),
'junnyu/roformer_small_discriminator': (
'https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json'
),
'junnyu/roformer_small_generator': (
'https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json'
),
# See all RoFormer models at https://huggingface.co/models?filter=roformer
}
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = """roformer"""
def __init__( self : Dict , UpperCamelCase__ : List[str]=5_0000 , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : int=768 , UpperCamelCase__ : Tuple=12 , UpperCamelCase__ : Optional[int]=12 , UpperCamelCase__ : Optional[Any]=3072 , UpperCamelCase__ : List[str]="gelu" , UpperCamelCase__ : List[Any]=0.1 , UpperCamelCase__ : Any=0.1 , UpperCamelCase__ : Dict=1536 , UpperCamelCase__ : List[Any]=2 , UpperCamelCase__ : List[str]=0.02 , UpperCamelCase__ : int=1E-12 , UpperCamelCase__ : List[str]=0 , UpperCamelCase__ : Tuple=False , UpperCamelCase__ : Optional[Any]=True , **UpperCamelCase__ : Dict , ):
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase__ , **UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = vocab_size
SCREAMING_SNAKE_CASE : Optional[Any] = hidden_size if embedding_size is None else embedding_size
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_size
SCREAMING_SNAKE_CASE : Any = num_hidden_layers
SCREAMING_SNAKE_CASE : Tuple = num_attention_heads
SCREAMING_SNAKE_CASE : Tuple = hidden_act
SCREAMING_SNAKE_CASE : Dict = intermediate_size
SCREAMING_SNAKE_CASE : Optional[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Union[str, Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : str = max_position_embeddings
SCREAMING_SNAKE_CASE : Optional[Any] = type_vocab_size
SCREAMING_SNAKE_CASE : Optional[Any] = initializer_range
SCREAMING_SNAKE_CASE : Dict = layer_norm_eps
SCREAMING_SNAKE_CASE : List[Any] = rotary_value
SCREAMING_SNAKE_CASE : List[str] = use_cache
class lowercase__ ( UpperCamelCase_):
@property
def __A ( self : Optional[Any] ):
'''simple docstring'''
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE : str = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
SCREAMING_SNAKE_CASE : Dict = {0: '''batch''', 1: '''sequence'''}
SCREAMING_SNAKE_CASE : Union[str, Any] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] )
| 34
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__UpperCamelCase : Tuple = {
'configuration_ctrl': ['CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CTRLConfig'],
'tokenization_ctrl': ['CTRLTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Dict = [
'CTRL_PRETRAINED_MODEL_ARCHIVE_LIST',
'CTRLForSequenceClassification',
'CTRLLMHeadModel',
'CTRLModel',
'CTRLPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Any = [
'TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFCTRLForSequenceClassification',
'TFCTRLLMHeadModel',
'TFCTRLModel',
'TFCTRLPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig
from .tokenization_ctrl import CTRLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ctrl import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
CTRLPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_ctrl import (
TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCTRLForSequenceClassification,
TFCTRLLMHeadModel,
TFCTRLModel,
TFCTRLPreTrainedModel,
)
else:
import sys
__UpperCamelCase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 34
| 1
|
import argparse
import collections
import os
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_table.py
__UpperCamelCase : int = 'src/transformers'
__UpperCamelCase : Optional[Any] = 'docs/source/en'
__UpperCamelCase : Union[str, Any] = '.'
def A ( _lowercase , _lowercase , _lowercase ):
with open(_lowercase , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
SCREAMING_SNAKE_CASE : Any = f.readlines()
# Find the start prompt.
SCREAMING_SNAKE_CASE : List[Any] = 0
while not lines[start_index].startswith(_lowercase ):
start_index += 1
start_index += 1
SCREAMING_SNAKE_CASE : Tuple = start_index
while not lines[end_index].startswith(_lowercase ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# Add here suffixes that are used to identify models, separated by |
__UpperCamelCase : Tuple = 'Model|Encoder|Decoder|ForConditionalGeneration'
# Regexes that match TF/Flax/PT model names.
__UpperCamelCase : Union[str, Any] = re.compile(R'TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
__UpperCamelCase : Dict = re.compile(R'Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
__UpperCamelCase : Tuple = re.compile(R'(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# This is to make sure the transformers module imported is the one in the repo.
__UpperCamelCase : List[Any] = direct_transformers_import(TRANSFORMERS_PATH)
def A ( _lowercase ):
SCREAMING_SNAKE_CASE : Tuple = re.finditer('''.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)''' , _lowercase )
return [m.group(0 ) for m in matches]
def A ( _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE : Union[str, Any] = 2 if text == '''✅''' or text == '''❌''' else len(_lowercase )
SCREAMING_SNAKE_CASE : Tuple = (width - text_length) // 2
SCREAMING_SNAKE_CASE : List[str] = width - text_length - left_indent
return " " * left_indent + text + " " * right_indent
def A ( ):
SCREAMING_SNAKE_CASE : Tuple = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
SCREAMING_SNAKE_CASE : List[Any] = {
name: config_maping_names[code]
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if code in config_maping_names
}
SCREAMING_SNAKE_CASE : Optional[int] = {name: config.replace('''Config''' , '''''' ) for name, config in model_name_to_config.items()}
# Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax.
SCREAMING_SNAKE_CASE : List[Any] = collections.defaultdict(_lowercase )
SCREAMING_SNAKE_CASE : Optional[int] = collections.defaultdict(_lowercase )
SCREAMING_SNAKE_CASE : Union[str, Any] = collections.defaultdict(_lowercase )
SCREAMING_SNAKE_CASE : Any = collections.defaultdict(_lowercase )
SCREAMING_SNAKE_CASE : int = collections.defaultdict(_lowercase )
# Let's lookup through all transformers object (once).
for attr_name in dir(_lowercase ):
SCREAMING_SNAKE_CASE : Any = None
if attr_name.endswith('''Tokenizer''' ):
SCREAMING_SNAKE_CASE : Optional[int] = slow_tokenizers
SCREAMING_SNAKE_CASE : Any = attr_name[:-9]
elif attr_name.endswith('''TokenizerFast''' ):
SCREAMING_SNAKE_CASE : str = fast_tokenizers
SCREAMING_SNAKE_CASE : Union[str, Any] = attr_name[:-13]
elif _re_tf_models.match(_lowercase ) is not None:
SCREAMING_SNAKE_CASE : Optional[Any] = tf_models
SCREAMING_SNAKE_CASE : int = _re_tf_models.match(_lowercase ).groups()[0]
elif _re_flax_models.match(_lowercase ) is not None:
SCREAMING_SNAKE_CASE : Any = flax_models
SCREAMING_SNAKE_CASE : str = _re_flax_models.match(_lowercase ).groups()[0]
elif _re_pt_models.match(_lowercase ) is not None:
SCREAMING_SNAKE_CASE : Union[str, Any] = pt_models
SCREAMING_SNAKE_CASE : Optional[Any] = _re_pt_models.match(_lowercase ).groups()[0]
if lookup_dict is not None:
while len(_lowercase ) > 0:
if attr_name in model_name_to_prefix.values():
SCREAMING_SNAKE_CASE : Optional[Any] = True
break
# Try again after removing the last word in the name
SCREAMING_SNAKE_CASE : Dict = ''''''.join(camel_case_split(_lowercase )[:-1] )
# Let's build that table!
SCREAMING_SNAKE_CASE : Union[str, Any] = list(model_name_to_config.keys() )
model_names.sort(key=str.lower )
SCREAMING_SNAKE_CASE : int = ['''Model''', '''Tokenizer slow''', '''Tokenizer fast''', '''PyTorch support''', '''TensorFlow support''', '''Flax Support''']
# We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side).
SCREAMING_SNAKE_CASE : Optional[Any] = [len(_lowercase ) + 2 for c in columns]
SCREAMING_SNAKE_CASE : str = max([len(_lowercase ) for name in model_names] ) + 2
# Build the table per se
SCREAMING_SNAKE_CASE : Any = '''|''' + '''|'''.join([_center_text(_lowercase , _lowercase ) for c, w in zip(_lowercase , _lowercase )] ) + '''|\n'''
# Use ":-----:" format to center-aligned table cell texts
table += "|" + "|".join([''':''' + '''-''' * (w - 2) + ''':''' for w in widths] ) + "|\n"
SCREAMING_SNAKE_CASE : str = {True: '''✅''', False: '''❌'''}
for name in model_names:
SCREAMING_SNAKE_CASE : Union[str, Any] = model_name_to_prefix[name]
SCREAMING_SNAKE_CASE : List[Any] = [
name,
check[slow_tokenizers[prefix]],
check[fast_tokenizers[prefix]],
check[pt_models[prefix]],
check[tf_models[prefix]],
check[flax_models[prefix]],
]
table += "|" + "|".join([_center_text(_lowercase , _lowercase ) for l, w in zip(_lowercase , _lowercase )] ) + "|\n"
return table
def A ( _lowercase=False ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = _find_text_in_file(
filename=os.path.join(_lowercase , '''index.md''' ) , start_prompt='''<!--This table is updated automatically from the auto modules''' , end_prompt='''<!-- End table-->''' , )
SCREAMING_SNAKE_CASE : Optional[int] = get_model_table_from_auto_modules()
if current_table != new_table:
if overwrite:
with open(os.path.join(_lowercase , '''index.md''' ) , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(lines[:start_index] + [new_table] + lines[end_index:] )
else:
raise ValueError(
'''The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this.''' )
if __name__ == "__main__":
__UpperCamelCase : str = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
__UpperCamelCase : str = parser.parse_args()
check_model_table(args.fix_and_overwrite)
| 34
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__UpperCamelCase : Tuple = {
'configuration_maskformer': ['MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MaskFormerConfig'],
'configuration_maskformer_swin': ['MaskFormerSwinConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[Any] = ['MaskFormerFeatureExtractor']
__UpperCamelCase : List[Any] = ['MaskFormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[int] = [
'MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'MaskFormerForInstanceSegmentation',
'MaskFormerModel',
'MaskFormerPreTrainedModel',
]
__UpperCamelCase : Union[str, Any] = [
'MaskFormerSwinBackbone',
'MaskFormerSwinModel',
'MaskFormerSwinPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig
from .configuration_maskformer_swin import MaskFormerSwinConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_maskformer import MaskFormerFeatureExtractor
from .image_processing_maskformer import MaskFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskformer import (
MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskFormerForInstanceSegmentation,
MaskFormerModel,
MaskFormerPreTrainedModel,
)
from .modeling_maskformer_swin import (
MaskFormerSwinBackbone,
MaskFormerSwinModel,
MaskFormerSwinPreTrainedModel,
)
else:
import sys
__UpperCamelCase : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 34
| 1
|
import copy
import re
class lowercase__ :
UpperCamelCase_ = """hp"""
UpperCamelCase_ = {}
UpperCamelCase_ = None
@classmethod
def __A ( cls : List[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = prefix
SCREAMING_SNAKE_CASE : Union[str, Any] = defaults
cls.build_naming_info()
@staticmethod
def __A ( UpperCamelCase__ : Dict , UpperCamelCase__ : Any ):
'''simple docstring'''
if len(UpperCamelCase__ ) == 0:
return ""
SCREAMING_SNAKE_CASE : List[Any] = None
if any(char.isdigit() for char in word ):
raise Exception(f"""Parameters should not contain numbers: '{word}' contains a number""" )
if word in info["short_word"]:
return info["short_word"][word]
for prefix_len in range(1 , len(UpperCamelCase__ ) + 1 ):
SCREAMING_SNAKE_CASE : Optional[int] = word[:prefix_len]
if prefix in info["reverse_short_word"]:
continue
else:
SCREAMING_SNAKE_CASE : Dict = prefix
break
if short_word is None:
# Paranoid fallback
def int_to_alphabetic(UpperCamelCase__ : Any ):
SCREAMING_SNAKE_CASE : List[Any] = ''''''
while integer != 0:
SCREAMING_SNAKE_CASE : List[Any] = chr(ord('''A''' ) + integer % 10 ) + s
integer //= 10
return s
SCREAMING_SNAKE_CASE : Tuple = 0
while True:
SCREAMING_SNAKE_CASE : int = word + '''#''' + int_to_alphabetic(UpperCamelCase__ )
if sword in info["reverse_short_word"]:
continue
else:
SCREAMING_SNAKE_CASE : str = sword
break
SCREAMING_SNAKE_CASE : Union[str, Any] = short_word
SCREAMING_SNAKE_CASE : Dict = word
return short_word
@staticmethod
def __A ( UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = param_name.split('''_''' )
SCREAMING_SNAKE_CASE : Dict = [TrialShortNamer.shortname_for_word(UpperCamelCase__ , UpperCamelCase__ ) for word in words]
# We try to create a separatorless short name, but if there is a collision we have to fallback
# to a separated short name
SCREAMING_SNAKE_CASE : Optional[Any] = ['''''', '''_''']
for separator in separators:
SCREAMING_SNAKE_CASE : Optional[Any] = separator.join(UpperCamelCase__ )
if shortname not in info["reverse_short_param"]:
SCREAMING_SNAKE_CASE : Optional[int] = shortname
SCREAMING_SNAKE_CASE : Union[str, Any] = param_name
return shortname
return param_name
@staticmethod
def __A ( UpperCamelCase__ : List[Any] , UpperCamelCase__ : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = TrialShortNamer.shortname_for_key(UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : str = short_name
SCREAMING_SNAKE_CASE : Tuple = param_name
@classmethod
def __A ( cls : List[Any] ):
'''simple docstring'''
if cls.NAMING_INFO is not None:
return
SCREAMING_SNAKE_CASE : Dict = {
'''short_word''': {},
'''reverse_short_word''': {},
'''short_param''': {},
'''reverse_short_param''': {},
}
SCREAMING_SNAKE_CASE : Union[str, Any] = list(cls.DEFAULTS.keys() )
for k in field_keys:
cls.add_new_param_name(UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : int = info
@classmethod
def __A ( cls : str , UpperCamelCase__ : List[str] ):
'''simple docstring'''
cls.build_naming_info()
assert cls.PREFIX is not None
SCREAMING_SNAKE_CASE : Any = [copy.copy(cls.PREFIX )]
for k, v in params.items():
if k not in cls.DEFAULTS:
raise Exception(f"""You should provide a default value for the param name {k} with value {v}""" )
if v == cls.DEFAULTS[k]:
# The default value is not added to the name
continue
SCREAMING_SNAKE_CASE : str = cls.NAMING_INFO['''short_param'''][k]
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE : int = 1 if v else 0
SCREAMING_SNAKE_CASE : List[Any] = '''''' if isinstance(UpperCamelCase__ , (int, float) ) else '''-'''
SCREAMING_SNAKE_CASE : str = f"""{key}{sep}{v}"""
name.append(UpperCamelCase__ )
return "_".join(UpperCamelCase__ )
@classmethod
def __A ( cls : List[Any] , UpperCamelCase__ : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = repr[len(cls.PREFIX ) + 1 :]
if repr == "":
SCREAMING_SNAKE_CASE : Union[str, Any] = []
else:
SCREAMING_SNAKE_CASE : int = repr.split('''_''' )
SCREAMING_SNAKE_CASE : Optional[int] = {}
for value in values:
if "-" in value:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = value.split('''-''' )
else:
SCREAMING_SNAKE_CASE : Tuple = re.sub('''[0-9.]''' , '''''' , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = float(re.sub('''[^0-9.]''' , '''''' , UpperCamelCase__ ) )
SCREAMING_SNAKE_CASE : int = cls.NAMING_INFO['''reverse_short_param'''][p_k]
SCREAMING_SNAKE_CASE : Optional[Any] = p_v
for k in cls.DEFAULTS:
if k not in parameters:
SCREAMING_SNAKE_CASE : Tuple = cls.DEFAULTS[k]
return parameters
| 34
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
__UpperCamelCase : Dict = [
'EAGER',
'AOT_EAGER',
'INDUCTOR',
'NVFUSER',
'AOT_NVFUSER',
'AOT_CUDAGRAPHS',
'OFI',
'FX2TRT',
'ONNXRT',
'IPEX',
]
def A ( _lowercase , _lowercase=None , _lowercase=None , _lowercase=None ):
SCREAMING_SNAKE_CASE : Union[str, Any] = True
while ask_again:
SCREAMING_SNAKE_CASE : Optional[Any] = input(_lowercase )
try:
if default is not None and len(_lowercase ) == 0:
return default
return convert_value(_lowercase ) if convert_value is not None else result
except Exception:
if error_message is not None:
print(_lowercase )
def A ( _lowercase , _lowercase=[] , _lowercase=None , _lowercase=0 ):
SCREAMING_SNAKE_CASE : Dict = BulletMenu(_lowercase , _lowercase )
SCREAMING_SNAKE_CASE : str = menu.run(default_choice=_lowercase )
return convert_value(_lowercase ) if convert_value is not None else result
def A ( _lowercase ):
SCREAMING_SNAKE_CASE : Dict = int(_lowercase )
return ComputeEnvironment(['''LOCAL_MACHINE''', '''AMAZON_SAGEMAKER'''][value] )
def A ( _lowercase ):
SCREAMING_SNAKE_CASE : Any = int(_lowercase )
return DistributedType(['''NO''', '''MULTI_CPU''', '''MULTI_XPU''', '''MULTI_GPU''', '''MULTI_NPU''', '''TPU'''][value] )
def A ( _lowercase ):
SCREAMING_SNAKE_CASE : Union[str, Any] = int(_lowercase )
return DynamoBackend(DYNAMO_BACKENDS[value] ).value
def A ( _lowercase ):
SCREAMING_SNAKE_CASE : Union[str, Any] = int(_lowercase )
return PrecisionType(['''no''', '''fp16''', '''bf16''', '''fp8'''][value] )
def A ( _lowercase ):
SCREAMING_SNAKE_CASE : Dict = int(_lowercase )
return SageMakerDistributedType(['''NO''', '''DATA_PARALLEL''', '''MODEL_PARALLEL'''][value] )
def A ( _lowercase ):
return {"yes": True, "no": False}[value.lower()]
class lowercase__ ( argparse.RawDescriptionHelpFormatter):
def __A ( self : List[str] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = super()._format_usage(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = usage.replace('''<command> [<args>] ''' , '''''' )
return usage
| 34
| 1
|
# This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
ControlNetModel,
DDIMScheduler,
StableDiffusionControlNetImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel
from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
)
enable_full_determinism()
class lowercase__ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase):
UpperCamelCase_ = StableDiffusionControlNetImgaImgPipeline
UpperCamelCase_ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width"""}
UpperCamelCase_ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
UpperCamelCase_ = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({"""control_image"""})
UpperCamelCase_ = IMAGE_TO_IMAGE_IMAGE_PARAMS
def __A ( self : Optional[int] ):
'''simple docstring'''
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : int = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Tuple = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=UpperCamelCase__ , set_alpha_to_one=UpperCamelCase__ , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Dict = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Dict = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
SCREAMING_SNAKE_CASE : Dict = CLIPTextModel(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
SCREAMING_SNAKE_CASE : str = {
'''unet''': unet,
'''controlnet''': controlnet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def __A ( self : List[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Dict=0 ):
'''simple docstring'''
if str(UpperCamelCase__ ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE : List[str] = torch.manual_seed(UpperCamelCase__ )
else:
SCREAMING_SNAKE_CASE : Tuple = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = 2
SCREAMING_SNAKE_CASE : int = randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=UpperCamelCase__ , device=torch.device(UpperCamelCase__ ) , )
SCREAMING_SNAKE_CASE : List[str] = floats_tensor(control_image.shape , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE : Optional[int] = Image.fromarray(np.uinta(UpperCamelCase__ ) ).convert('''RGB''' ).resize((64, 64) )
SCREAMING_SNAKE_CASE : Tuple = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
'''image''': image,
'''control_image''': control_image,
}
return inputs
def __A ( self : int ):
'''simple docstring'''
return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def __A ( self : int ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 )
def __A ( self : Dict ):
'''simple docstring'''
self._test_inference_batch_single_identical(expected_max_diff=2E-3 )
class lowercase__ ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase):
UpperCamelCase_ = StableDiffusionControlNetImgaImgPipeline
UpperCamelCase_ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width"""}
UpperCamelCase_ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
UpperCamelCase_ = frozenset([]) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess
def __A ( self : Dict ):
'''simple docstring'''
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
torch.manual_seed(0 )
def init_weights(UpperCamelCase__ : Optional[int] ):
if isinstance(UpperCamelCase__ , torch.nn.Convad ):
torch.nn.init.normal(m.weight )
m.bias.data.fill_(1.0 )
SCREAMING_SNAKE_CASE : str = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(UpperCamelCase__ )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : int = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(UpperCamelCase__ )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : List[str] = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=UpperCamelCase__ , set_alpha_to_one=UpperCamelCase__ , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : str = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : str = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
SCREAMING_SNAKE_CASE : Any = CLIPTextModel(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : str = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
SCREAMING_SNAKE_CASE : List[Any] = MultiControlNetModel([controlneta, controlneta] )
SCREAMING_SNAKE_CASE : Optional[Any] = {
'''unet''': unet,
'''controlnet''': controlnet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def __A ( self : Dict , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Union[str, Any]=0 ):
'''simple docstring'''
if str(UpperCamelCase__ ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE : List[str] = torch.manual_seed(UpperCamelCase__ )
else:
SCREAMING_SNAKE_CASE : Any = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = 2
SCREAMING_SNAKE_CASE : Any = [
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=UpperCamelCase__ , device=torch.device(UpperCamelCase__ ) , ),
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=UpperCamelCase__ , device=torch.device(UpperCamelCase__ ) , ),
]
SCREAMING_SNAKE_CASE : Optional[Any] = floats_tensor(control_image[0].shape , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Any = image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE : Any = Image.fromarray(np.uinta(UpperCamelCase__ ) ).convert('''RGB''' ).resize((64, 64) )
SCREAMING_SNAKE_CASE : Dict = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
'''image''': image,
'''control_image''': control_image,
}
return inputs
def __A ( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.get_dummy_components()
SCREAMING_SNAKE_CASE : List[Any] = self.pipeline_class(**UpperCamelCase__ )
pipe.to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = 10.0
SCREAMING_SNAKE_CASE : Dict = 4
SCREAMING_SNAKE_CASE : Tuple = self.get_dummy_inputs(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[str] = steps
SCREAMING_SNAKE_CASE : str = scale
SCREAMING_SNAKE_CASE : str = pipe(**UpperCamelCase__ )[0]
SCREAMING_SNAKE_CASE : Tuple = self.get_dummy_inputs(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = steps
SCREAMING_SNAKE_CASE : Tuple = scale
SCREAMING_SNAKE_CASE : Any = pipe(**UpperCamelCase__ , control_guidance_start=0.1 , control_guidance_end=0.2 )[0]
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_dummy_inputs(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Any = steps
SCREAMING_SNAKE_CASE : List[Any] = scale
SCREAMING_SNAKE_CASE : Any = pipe(**UpperCamelCase__ , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7] )[0]
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_dummy_inputs(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : str = steps
SCREAMING_SNAKE_CASE : int = scale
SCREAMING_SNAKE_CASE : str = pipe(**UpperCamelCase__ , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8] )[0]
# make sure that all outputs are different
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
def __A ( self : List[str] ):
'''simple docstring'''
return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def __A ( self : str ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 )
def __A ( self : Optional[int] ):
'''simple docstring'''
self._test_inference_batch_single_identical(expected_max_diff=2E-3 )
def __A ( self : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self.get_dummy_components()
SCREAMING_SNAKE_CASE : Tuple = self.pipeline_class(**UpperCamelCase__ )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmpdir:
try:
# save_pretrained is not implemented for Multi-ControlNet
pipe.save_pretrained(UpperCamelCase__ )
except NotImplementedError:
pass
@slow
@require_torch_gpu
class lowercase__ ( unittest.TestCase):
def __A ( self : List[str] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = ControlNetModel.from_pretrained('''lllyasviel/sd-controlnet-canny''' )
SCREAMING_SNAKE_CASE : str = StableDiffusionControlNetImgaImgPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , safety_checker=UpperCamelCase__ , controlnet=UpperCamelCase__ )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE : Tuple = '''evil space-punk bird'''
SCREAMING_SNAKE_CASE : int = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png''' ).resize((512, 512) )
SCREAMING_SNAKE_CASE : Optional[int] = load_image(
'''https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png''' ).resize((512, 512) )
SCREAMING_SNAKE_CASE : List[str] = pipe(
UpperCamelCase__ , UpperCamelCase__ , control_image=UpperCamelCase__ , generator=UpperCamelCase__ , output_type='''np''' , num_inference_steps=50 , strength=0.6 , )
SCREAMING_SNAKE_CASE : List[str] = output.images[0]
assert image.shape == (512, 512, 3)
SCREAMING_SNAKE_CASE : Union[str, Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy''' )
assert np.abs(expected_image - image ).max() < 9E-2
| 34
|
from __future__ import annotations
from typing import Any
class lowercase__ ( UpperCamelCase_):
pass
class lowercase__ :
def __init__( self : Union[str, Any] , UpperCamelCase__ : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = data
SCREAMING_SNAKE_CASE : Node | None = None
def __iter__( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self
SCREAMING_SNAKE_CASE : Tuple = []
while node:
if node in visited:
raise ContainsLoopError
visited.append(UpperCamelCase__ )
yield node.data
SCREAMING_SNAKE_CASE : Dict = node.next_node
@property
def __A ( self : Optional[int] ):
'''simple docstring'''
try:
list(self )
return False
except ContainsLoopError:
return True
if __name__ == "__main__":
__UpperCamelCase : List[Any] = Node(1)
__UpperCamelCase : str = Node(2)
__UpperCamelCase : Dict = Node(3)
__UpperCamelCase : List[Any] = Node(4)
print(root_node.has_loop) # False
__UpperCamelCase : int = root_node.next_node
print(root_node.has_loop) # True
__UpperCamelCase : Union[str, Any] = Node(5)
__UpperCamelCase : Union[str, Any] = Node(6)
__UpperCamelCase : List[Any] = Node(5)
__UpperCamelCase : List[str] = Node(6)
print(root_node.has_loop) # False
__UpperCamelCase : List[Any] = Node(1)
print(root_node.has_loop) # False
| 34
| 1
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__UpperCamelCase : Optional[int] = logging.get_logger(__name__)
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = ["""pixel_values"""]
def __init__( self : Any , UpperCamelCase__ : bool = True , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : PILImageResampling = PIL.Image.BICUBIC , UpperCamelCase__ : bool = True , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : Union[int, float] = 1 / 255 , UpperCamelCase__ : bool = True , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , **UpperCamelCase__ : Union[str, Any] , ):
'''simple docstring'''
super().__init__(**UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[str] = size if size is not None else {'''height''': 256, '''width''': 256}
SCREAMING_SNAKE_CASE : Dict = get_size_dict(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[str] = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
SCREAMING_SNAKE_CASE : Optional[int] = get_size_dict(UpperCamelCase__ , param_name='''crop_size''' )
SCREAMING_SNAKE_CASE : str = do_resize
SCREAMING_SNAKE_CASE : int = size
SCREAMING_SNAKE_CASE : Dict = resample
SCREAMING_SNAKE_CASE : str = do_center_crop
SCREAMING_SNAKE_CASE : Dict = crop_size
SCREAMING_SNAKE_CASE : Optional[Any] = do_rescale
SCREAMING_SNAKE_CASE : List[str] = rescale_factor
SCREAMING_SNAKE_CASE : Tuple = do_normalize
SCREAMING_SNAKE_CASE : List[Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
SCREAMING_SNAKE_CASE : int = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __A ( self : Optional[Any] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Dict[str, int] , UpperCamelCase__ : PILImageResampling = PIL.Image.BICUBIC , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : int , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = get_size_dict(UpperCamelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""" )
return resize(
UpperCamelCase__ , size=(size['''height'''], size['''width''']) , resample=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def __A ( self : List[str] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Dict[str, int] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : int , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = get_size_dict(UpperCamelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""" )
return center_crop(UpperCamelCase__ , size=(size['''height'''], size['''width''']) , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def __A ( self : Any , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Union[int, float] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Union[str, Any] , ):
'''simple docstring'''
return rescale(UpperCamelCase__ , scale=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def __A ( self : str , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Tuple , ):
'''simple docstring'''
return normalize(UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def __A ( self : List[Any] , UpperCamelCase__ : ImageInput , UpperCamelCase__ : bool = None , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : int=None , UpperCamelCase__ : bool = None , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : float = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , UpperCamelCase__ : ChannelDimension = ChannelDimension.FIRST , **UpperCamelCase__ : Tuple , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE : int = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE : Tuple = do_center_crop if do_center_crop is not None else self.do_center_crop
SCREAMING_SNAKE_CASE : str = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE : int = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE : int = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE : Union[str, Any] = image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE : List[Any] = image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE : Any = size if size is not None else self.size
SCREAMING_SNAKE_CASE : Tuple = get_size_dict(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = crop_size if crop_size is not None else self.crop_size
SCREAMING_SNAKE_CASE : Dict = get_size_dict(UpperCamelCase__ , param_name='''crop_size''' )
SCREAMING_SNAKE_CASE : List[str] = make_list_of_images(UpperCamelCase__ )
if not valid_images(UpperCamelCase__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE : Dict = [to_numpy_array(UpperCamelCase__ ) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE : Optional[Any] = [self.resize(image=UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ ) for image in images]
if do_center_crop:
SCREAMING_SNAKE_CASE : Optional[int] = [self.center_crop(image=UpperCamelCase__ , size=UpperCamelCase__ ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE : int = [self.rescale(image=UpperCamelCase__ , scale=UpperCamelCase__ ) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE : Tuple = [self.normalize(image=UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ ) for image in images]
SCREAMING_SNAKE_CASE : Optional[Any] = [to_channel_dimension_format(UpperCamelCase__ , UpperCamelCase__ ) for image in images]
SCREAMING_SNAKE_CASE : int = {'''pixel_values''': images}
return BatchFeature(data=UpperCamelCase__ , tensor_type=UpperCamelCase__ )
| 34
|
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
__UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = ["""input_features""", """is_longer"""]
def __init__( self : Optional[Any] , UpperCamelCase__ : Dict=64 , UpperCamelCase__ : Optional[Any]=4_8000 , UpperCamelCase__ : Tuple=480 , UpperCamelCase__ : Union[str, Any]=10 , UpperCamelCase__ : List[Any]=1024 , UpperCamelCase__ : Tuple=0.0 , UpperCamelCase__ : int=False , UpperCamelCase__ : float = 0 , UpperCamelCase__ : float = 1_4000 , UpperCamelCase__ : int = None , UpperCamelCase__ : str = "fusion" , UpperCamelCase__ : str = "repeatpad" , **UpperCamelCase__ : Union[str, Any] , ):
'''simple docstring'''
super().__init__(
feature_size=UpperCamelCase__ , sampling_rate=UpperCamelCase__ , padding_value=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , **UpperCamelCase__ , )
SCREAMING_SNAKE_CASE : Union[str, Any] = top_db
SCREAMING_SNAKE_CASE : Union[str, Any] = truncation
SCREAMING_SNAKE_CASE : str = padding
SCREAMING_SNAKE_CASE : List[Any] = fft_window_size
SCREAMING_SNAKE_CASE : Tuple = (fft_window_size >> 1) + 1
SCREAMING_SNAKE_CASE : List[str] = hop_length
SCREAMING_SNAKE_CASE : List[Any] = max_length_s
SCREAMING_SNAKE_CASE : Tuple = max_length_s * sampling_rate
SCREAMING_SNAKE_CASE : List[Any] = sampling_rate
SCREAMING_SNAKE_CASE : List[str] = frequency_min
SCREAMING_SNAKE_CASE : Any = frequency_max
SCREAMING_SNAKE_CASE : List[Any] = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=UpperCamelCase__ , min_frequency=UpperCamelCase__ , max_frequency=UpperCamelCase__ , sampling_rate=UpperCamelCase__ , norm=UpperCamelCase__ , mel_scale='''htk''' , )
SCREAMING_SNAKE_CASE : Optional[Any] = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=UpperCamelCase__ , min_frequency=UpperCamelCase__ , max_frequency=UpperCamelCase__ , sampling_rate=UpperCamelCase__ , norm='''slaney''' , mel_scale='''slaney''' , )
def __A ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE : Optional[Any] = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def __A ( self : Optional[int] , UpperCamelCase__ : np.array , UpperCamelCase__ : Optional[np.array] = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = spectrogram(
UpperCamelCase__ , window_function(self.fft_window_size , '''hann''' ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=UpperCamelCase__ , log_mel='''dB''' , )
return log_mel_spectrogram.T
def __A ( self : Union[str, Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
SCREAMING_SNAKE_CASE : int = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
SCREAMING_SNAKE_CASE : Any = [0]
# randomly choose index for each part
SCREAMING_SNAKE_CASE : Union[str, Any] = np.random.choice(ranges[0] )
SCREAMING_SNAKE_CASE : List[Any] = np.random.choice(ranges[1] )
SCREAMING_SNAKE_CASE : int = np.random.choice(ranges[2] )
SCREAMING_SNAKE_CASE : Optional[int] = mel[idx_front : idx_front + chunk_frames, :]
SCREAMING_SNAKE_CASE : Optional[Any] = mel[idx_middle : idx_middle + chunk_frames, :]
SCREAMING_SNAKE_CASE : Tuple = mel[idx_back : idx_back + chunk_frames, :]
SCREAMING_SNAKE_CASE : str = torch.tensor(mel[None, None, :] )
SCREAMING_SNAKE_CASE : Optional[int] = torch.nn.functional.interpolate(
UpperCamelCase__ , size=[chunk_frames, 64] , mode='''bilinear''' , align_corners=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = mel_shrink[0][0].numpy()
SCREAMING_SNAKE_CASE : Union[str, Any] = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 )
return mel_fusion
def __A ( self : Dict , UpperCamelCase__ : np.array , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[str] ):
'''simple docstring'''
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
SCREAMING_SNAKE_CASE : Optional[Any] = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
SCREAMING_SNAKE_CASE : List[Any] = len(UpperCamelCase__ ) - max_length
SCREAMING_SNAKE_CASE : Dict = np.random.randint(0 , overflow + 1 )
SCREAMING_SNAKE_CASE : Optional[Any] = waveform[idx : idx + max_length]
SCREAMING_SNAKE_CASE : Any = self._np_extract_fbank_features(UpperCamelCase__ , self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
SCREAMING_SNAKE_CASE : Any = self._np_extract_fbank_features(UpperCamelCase__ , self.mel_filters )
SCREAMING_SNAKE_CASE : List[str] = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
SCREAMING_SNAKE_CASE : List[Any] = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
SCREAMING_SNAKE_CASE : Optional[Any] = np.stack([mel, mel, mel, mel] , axis=0 )
SCREAMING_SNAKE_CASE : Tuple = False
else:
SCREAMING_SNAKE_CASE : str = self._random_mel_fusion(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = True
else:
raise NotImplementedError(f"""data_truncating {truncation} not implemented""" )
else:
SCREAMING_SNAKE_CASE : List[str] = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
SCREAMING_SNAKE_CASE : Tuple = int(max_length / len(UpperCamelCase__ ) )
SCREAMING_SNAKE_CASE : Any = np.stack(np.tile(UpperCamelCase__ , n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
SCREAMING_SNAKE_CASE : List[Any] = int(max_length / len(UpperCamelCase__ ) )
SCREAMING_SNAKE_CASE : Dict = np.stack(np.tile(UpperCamelCase__ , UpperCamelCase__ ) )
SCREAMING_SNAKE_CASE : Dict = np.pad(UpperCamelCase__ , (0, max_length - waveform.shape[0]) , mode='''constant''' , constant_values=0 )
if truncation == "fusion":
SCREAMING_SNAKE_CASE : List[Any] = self._np_extract_fbank_features(UpperCamelCase__ , self.mel_filters )
SCREAMING_SNAKE_CASE : Union[str, Any] = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 )
else:
SCREAMING_SNAKE_CASE : List[str] = self._np_extract_fbank_features(UpperCamelCase__ , self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self : Union[str, Any] , UpperCamelCase__ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , UpperCamelCase__ : str = None , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , **UpperCamelCase__ : Any , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = truncation if truncation is not None else self.truncation
SCREAMING_SNAKE_CASE : List[str] = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"""The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"""
f""" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"""
f""" was sampled with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
SCREAMING_SNAKE_CASE : List[str] = isinstance(UpperCamelCase__ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" )
SCREAMING_SNAKE_CASE : int = is_batched_numpy or (
isinstance(UpperCamelCase__ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
SCREAMING_SNAKE_CASE : Any = [np.asarray(UpperCamelCase__ , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(UpperCamelCase__ , np.ndarray ):
SCREAMING_SNAKE_CASE : List[Any] = np.asarray(UpperCamelCase__ , dtype=np.floataa )
elif isinstance(UpperCamelCase__ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
SCREAMING_SNAKE_CASE : Union[str, Any] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
SCREAMING_SNAKE_CASE : List[str] = [np.asarray(UpperCamelCase__ )]
# convert to mel spectrogram, truncate and pad if needed.
SCREAMING_SNAKE_CASE : int = [
self._get_input_mel(UpperCamelCase__ , max_length if max_length else self.nb_max_samples , UpperCamelCase__ , UpperCamelCase__ )
for waveform in raw_speech
]
SCREAMING_SNAKE_CASE : str = []
SCREAMING_SNAKE_CASE : List[str] = []
for mel, longer in padded_inputs:
input_mel.append(UpperCamelCase__ )
is_longer.append(UpperCamelCase__ )
if truncation == "fusion" and sum(UpperCamelCase__ ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
SCREAMING_SNAKE_CASE : Union[str, Any] = np.random.randint(0 , len(UpperCamelCase__ ) )
SCREAMING_SNAKE_CASE : Optional[Any] = True
if isinstance(input_mel[0] , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE : Union[str, Any] = [np.asarray(UpperCamelCase__ , dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
SCREAMING_SNAKE_CASE : Optional[Any] = [[longer] for longer in is_longer]
SCREAMING_SNAKE_CASE : Union[str, Any] = {'''input_features''': input_mel, '''is_longer''': is_longer}
SCREAMING_SNAKE_CASE : int = BatchFeature(UpperCamelCase__ )
if return_tensors is not None:
SCREAMING_SNAKE_CASE : int = input_features.convert_to_tensors(UpperCamelCase__ )
return input_features
| 34
| 1
|
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=UpperCamelCase_)
class lowercase__ ( UpperCamelCase_):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
UpperCamelCase_ = field(default="""summarization""" , metadata={"""include_in_asdict_even_if_is_default""": True})
UpperCamelCase_ = Features({"""text""": Value("""string""")})
UpperCamelCase_ = Features({"""summary""": Value("""string""")})
UpperCamelCase_ = "text"
UpperCamelCase_ = "summary"
@property
def __A ( self : Optional[int] ):
'''simple docstring'''
return {self.text_column: "text", self.summary_column: "summary"}
| 34
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
__UpperCamelCase : str = logging.get_logger(__name__)
__UpperCamelCase : str = {
'microsoft/layoutlmv3-base': 'https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json',
}
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = """layoutlmv3"""
def __init__( self : Optional[int] , UpperCamelCase__ : Union[str, Any]=5_0265 , UpperCamelCase__ : List[Any]=768 , UpperCamelCase__ : Union[str, Any]=12 , UpperCamelCase__ : Union[str, Any]=12 , UpperCamelCase__ : Tuple=3072 , UpperCamelCase__ : List[Any]="gelu" , UpperCamelCase__ : List[Any]=0.1 , UpperCamelCase__ : List[str]=0.1 , UpperCamelCase__ : Any=512 , UpperCamelCase__ : List[Any]=2 , UpperCamelCase__ : List[Any]=0.02 , UpperCamelCase__ : Optional[Any]=1E-5 , UpperCamelCase__ : List[str]=1 , UpperCamelCase__ : int=0 , UpperCamelCase__ : str=2 , UpperCamelCase__ : List[str]=1024 , UpperCamelCase__ : str=128 , UpperCamelCase__ : str=128 , UpperCamelCase__ : Tuple=True , UpperCamelCase__ : Optional[int]=32 , UpperCamelCase__ : Any=128 , UpperCamelCase__ : Optional[Any]=64 , UpperCamelCase__ : Dict=256 , UpperCamelCase__ : List[Any]=True , UpperCamelCase__ : str=True , UpperCamelCase__ : Dict=True , UpperCamelCase__ : Dict=224 , UpperCamelCase__ : Optional[Any]=3 , UpperCamelCase__ : Any=16 , UpperCamelCase__ : Any=None , **UpperCamelCase__ : Optional[Any] , ):
'''simple docstring'''
super().__init__(
vocab_size=UpperCamelCase__ , hidden_size=UpperCamelCase__ , num_hidden_layers=UpperCamelCase__ , num_attention_heads=UpperCamelCase__ , intermediate_size=UpperCamelCase__ , hidden_act=UpperCamelCase__ , hidden_dropout_prob=UpperCamelCase__ , attention_probs_dropout_prob=UpperCamelCase__ , max_position_embeddings=UpperCamelCase__ , type_vocab_size=UpperCamelCase__ , initializer_range=UpperCamelCase__ , layer_norm_eps=UpperCamelCase__ , pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ , )
SCREAMING_SNAKE_CASE : Union[str, Any] = max_ad_position_embeddings
SCREAMING_SNAKE_CASE : List[Any] = coordinate_size
SCREAMING_SNAKE_CASE : List[str] = shape_size
SCREAMING_SNAKE_CASE : Optional[int] = has_relative_attention_bias
SCREAMING_SNAKE_CASE : List[Any] = rel_pos_bins
SCREAMING_SNAKE_CASE : str = max_rel_pos
SCREAMING_SNAKE_CASE : Any = has_spatial_attention_bias
SCREAMING_SNAKE_CASE : Union[str, Any] = rel_ad_pos_bins
SCREAMING_SNAKE_CASE : Union[str, Any] = max_rel_ad_pos
SCREAMING_SNAKE_CASE : Union[str, Any] = text_embed
SCREAMING_SNAKE_CASE : List[str] = visual_embed
SCREAMING_SNAKE_CASE : Optional[Any] = input_size
SCREAMING_SNAKE_CASE : Optional[Any] = num_channels
SCREAMING_SNAKE_CASE : List[Any] = patch_size
SCREAMING_SNAKE_CASE : List[Any] = classifier_dropout
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = version.parse("""1.12""")
@property
def __A ( self : str ):
'''simple docstring'''
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''sequence'''}),
('''bbox''', {0: '''batch''', 1: '''sequence'''}),
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
else:
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
('''bbox''', {0: '''batch''', 1: '''sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''sequence'''}),
('''pixel_values''', {0: '''batch''', 1: '''num_channels'''}),
] )
@property
def __A ( self : int ):
'''simple docstring'''
return 1E-5
@property
def __A ( self : Union[str, Any] ):
'''simple docstring'''
return 12
def __A ( self : Optional[Any] , UpperCamelCase__ : "ProcessorMixin" , UpperCamelCase__ : int = -1 , UpperCamelCase__ : int = -1 , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional["TensorType"] = None , UpperCamelCase__ : int = 3 , UpperCamelCase__ : int = 40 , UpperCamelCase__ : int = 40 , ):
'''simple docstring'''
setattr(processor.image_processor , '''apply_ocr''' , UpperCamelCase__ )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE : Any = compute_effective_axis_dimension(
UpperCamelCase__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE : List[Any] = processor.tokenizer.num_special_tokens_to_add(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Tuple = compute_effective_axis_dimension(
UpperCamelCase__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=UpperCamelCase__ )
# Generate dummy inputs according to compute batch and sequence
SCREAMING_SNAKE_CASE : Union[str, Any] = [[''' '''.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
SCREAMING_SNAKE_CASE : Any = [[[48, 84, 73, 128]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
SCREAMING_SNAKE_CASE : List[Any] = self._generate_dummy_images(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = dict(
processor(
UpperCamelCase__ , text=UpperCamelCase__ , boxes=UpperCamelCase__ , return_tensors=UpperCamelCase__ , ) )
return inputs
| 34
| 1
|
# HF Trainer benchmarking tool
#
# This tool can be used to run and compare multiple dimensions of the HF Trainers args.
#
# It then prints a report once in github format with all the information that needs to be shared
# with others and second time in a console-friendly format, so it's easier to use for tuning things up.
#
# The main idea is:
#
# ./trainer-benchmark.py --base-cmd '<cmd args that don't change>' \
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' \
# --target-metric-key train_samples_per_second
#
# The variations can be any command line argument that you want to compare and not just dtype as in
# the example.
#
# --variations allows you to compare variations in multiple dimensions.
#
# as the first dimention has 2 options and the second 3 in our example, this will run the trainer 6
# times adding one of:
#
# 1. --tf32 0 --fp16 0
# 2. --tf32 0 --fp16 1
# 3. --tf32 0 --bf16 1
# 4. --tf32 1 --fp16 0
# 5. --tf32 1 --fp16 1
# 6. --tf32 1 --bf16 1
#
# and print the results. This is just a cartesian product - and more than 2 dimensions can be used.
#
# If you want to rely on defaults, this:
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1'
# is identical to this:
# --variations '--tf32 0|--tf32 1' '|--fp16|--bf16'
#
# the leading empty variation in the 2nd dimension is a valid variation.
#
# So here we get the following 6 variations:
#
# 1. --tf32 0
# 2. --tf32 0 --fp16
# 3. --tf32 0 --bf16
# 4. --tf32 1
# 5. --tf32 1 --fp16
# 6. --tf32 1 --bf16
#
# In this particular case we don't know what the default tf32 setting is as it's normally
# pytorch-version dependent). That's why it's best to do an explicit setting of each variation:
# `--tf32 0|--tf32 1`
#
# Here is a full example of a train:
#
# CUDA_VISIBLE_DEVICES=0 python ./scripts/benchmark/trainer-benchmark.py \
# --base-cmd \
# ' examples/pytorch/translation/run_translation.py --model_name_or_path t5-small \
# --output_dir output_dir --do_train --label_smoothing 0.1 --logging_strategy no \
# --save_strategy no --per_device_train_batch_size 32 --max_source_length 512 \
# --max_target_length 512 --num_train_epochs 1 --overwrite_output_dir \
# --source_lang en --target_lang ro --dataset_name wmt16 --dataset_config "ro-en" \
# --source_prefix "translate English to Romanian: " --warmup_steps 50 \
# --max_train_samples 20000 --dataloader_num_workers 2 ' \
# --target-metric-key train_samples_per_second --repeat-times 1 --variations \
# '|--fp16|--bf16' '--tf32 0|--tf32 1' --report-metric-keys train_loss \
# --repeat-times 1 --base-variation '--tf32 0'
#
# and here is a possible output:
#
#
# | Variation | Train | Diff | Train |
# | | samples | % | loss |
# | | per | | |
# | | second | | |
# |:----------------|----------:|-------:|--------:|
# | --tf32 0 | 285.11 | 0 | 2.51 |
# | --tf32 1 | 342.09 | 20 | 2.51 |
# | --fp16 --tf32 0 | 423.49 | 49 | 2.51 |
# | --fp16 --tf32 1 | 423.13 | 48 | 2.51 |
# | --bf16 --tf32 0 | 416.80 | 46 | 2.52 |
# | --bf16 --tf32 1 | 415.87 | 46 | 2.52 |
#
#
# So you can quickly compare the different outcomes.
#
# Typically running each experiment once is enough, but if the environment is unstable you can
# re-run each multiple times, e.g., 3 using --repeat-times 3 and it will report the averaged results.
#
# By default it'll use the lowest result as the base line to use as 100% and then compare the rest to
# it as can be seen from the table above, but you can also specify which combination is the one to use as
# the baseline, e.g., to change to another entry use: --base-variation '--tf32 1 --fp16 0'
#
# --target-metric-key is there to tell the program which metrics to compare - the different metric keys are
# inside output_dir/all_results.json. e.g., to measure eval performance instead of train use:
# --target-metric-key eval_samples_per_second
# but of course you will need to adjust the --base-cmd value in the example to perform evaluation as
# well (as currently it doesn't)
#
import argparse
import datetime
import io
import itertools
import json
import math
import os
import platform
import re
import shlex
import subprocess
import sys
from pathlib import Path
from statistics import fmean
import pandas as pd
import torch
from tqdm import tqdm
import transformers
__UpperCamelCase : int = float('nan')
class lowercase__ :
def __init__( self : Any , UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = sys.stdout
SCREAMING_SNAKE_CASE : Dict = open(UpperCamelCase__ , '''a''' )
def __getattr__( self : Optional[int] , UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
return getattr(self.stdout , UpperCamelCase__ )
def __A ( self : List[Any] , UpperCamelCase__ : Union[str, Any] ):
'''simple docstring'''
self.stdout.write(UpperCamelCase__ )
# strip tqdm codes
self.file.write(re.sub(r'''^.*\r''' , '''''' , UpperCamelCase__ , 0 , re.M ) )
def A ( _lowercase=80 , _lowercase=False ):
SCREAMING_SNAKE_CASE : Union[str, Any] = []
# deal with critical env vars
SCREAMING_SNAKE_CASE : Union[str, Any] = ['''CUDA_VISIBLE_DEVICES''']
for key in env_keys:
SCREAMING_SNAKE_CASE : Optional[Any] = os.environ.get(_lowercase , _lowercase )
if val is not None:
cmd.append(f"""{key}={val}""" )
# python executable (not always needed if the script is executable)
SCREAMING_SNAKE_CASE : Tuple = sys.executable if full_python_path else sys.executable.split('''/''' )[-1]
cmd.append(_lowercase )
# now the normal args
cmd += list(map(shlex.quote , sys.argv ) )
# split up into up to MAX_WIDTH lines with shell multi-line escapes
SCREAMING_SNAKE_CASE : Optional[Any] = []
SCREAMING_SNAKE_CASE : Dict = ''''''
while len(_lowercase ) > 0:
current_line += f"""{cmd.pop(0 )} """
if len(_lowercase ) == 0 or len(_lowercase ) + len(cmd[0] ) + 1 > max_width - 1:
lines.append(_lowercase )
SCREAMING_SNAKE_CASE : Any = ''''''
return "\\\n".join(_lowercase )
def A ( _lowercase , _lowercase ):
# unwrap multi-line input
SCREAMING_SNAKE_CASE : Any = re.sub(R'''[\\\n]+''' , ''' ''' , args.base_cmd )
# remove --output_dir if any and set our own
SCREAMING_SNAKE_CASE : Union[str, Any] = re.sub('''--output_dir\s+[^\s]+''' , '''''' , args.base_cmd )
args.base_cmd += f""" --output_dir {output_dir}"""
# ensure we have --overwrite_output_dir
SCREAMING_SNAKE_CASE : Dict = re.sub('''--overwrite_output_dir\s+''' , '''''' , args.base_cmd )
args.base_cmd += " --overwrite_output_dir"
return [sys.executable] + shlex.split(args.base_cmd )
def A ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
# Enable to debug everything but the run itself, to do it fast and see the progress.
# This is useful for debugging the output formatting quickly - we can remove it later once
# everybody is happy with the output
if 0:
import random
from time import sleep
sleep(0 )
return dict(
{k: random.uniform(0 , 100 ) for k in metric_keys} , **{target_metric_key: random.choice([nan, 10.31, 100.2, 55.6666, 222.2222_2222] )} , )
SCREAMING_SNAKE_CASE : List[str] = subprocess.run(_lowercase , capture_output=_lowercase , text=_lowercase )
if verbose:
print('''STDOUT''' , result.stdout )
print('''STDERR''' , result.stderr )
# save the streams
SCREAMING_SNAKE_CASE : Union[str, Any] = variation.replace(''' ''' , '''-''' )
with open(Path(_lowercase ) / f"""log.{prefix}.stdout.txt""" , '''w''' ) as f:
f.write(result.stdout )
with open(Path(_lowercase ) / f"""log.{prefix}.stderr.txt""" , '''w''' ) as f:
f.write(result.stderr )
if result.returncode != 0:
if verbose:
print('''failed''' )
return {target_metric_key: nan}
with io.open(f"""{output_dir}/all_results.json""" , '''r''' , encoding='''utf-8''' ) as f:
SCREAMING_SNAKE_CASE : str = json.load(_lowercase )
# filter out just the keys we want
return {k: v for k, v in metrics.items() if k in metric_keys}
def A ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , ):
SCREAMING_SNAKE_CASE : str = []
SCREAMING_SNAKE_CASE : Any = []
SCREAMING_SNAKE_CASE : Optional[Any] = f"""{id}: {variation:<{longest_variation_len}}"""
SCREAMING_SNAKE_CASE : List[Any] = f"""{preamble}: """
SCREAMING_SNAKE_CASE : Union[str, Any] = set(report_metric_keys + [target_metric_key] )
for i in tqdm(range(_lowercase ) , desc=_lowercase , leave=_lowercase ):
SCREAMING_SNAKE_CASE : List[Any] = process_run_single(
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
SCREAMING_SNAKE_CASE : Tuple = single_run_metrics[target_metric_key]
if not math.isnan(_lowercase ):
metrics.append(_lowercase )
results.append(_lowercase )
outcome += "✓"
else:
outcome += "✘"
SCREAMING_SNAKE_CASE : Dict = f"""\33[2K\r{outcome}"""
if len(_lowercase ) > 0:
SCREAMING_SNAKE_CASE : Optional[int] = {k: fmean([x[k] for x in metrics] ) for k in metrics[0].keys()}
SCREAMING_SNAKE_CASE : List[Any] = round(mean_metrics[target_metric_key] , 2 )
SCREAMING_SNAKE_CASE : Optional[int] = f"""{outcome} {mean_target}"""
if len(_lowercase ) > 1:
results_str += f""" {tuple(round(_lowercase , 2 ) for x in results )}"""
print(_lowercase )
SCREAMING_SNAKE_CASE : List[Any] = variation
return mean_metrics
else:
print(_lowercase )
return {variation_key: variation, target_metric_key: nan}
def A ( ):
SCREAMING_SNAKE_CASE : List[Any] = torch.cuda.get_device_properties(torch.device('''cuda''' ) )
return f"""
Datetime : {datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S' )}
Software:
transformers: {transformers.__version__}
torch : {torch.__version__}
cuda : {torch.version.cuda}
python : {platform.python_version()}
Hardware:
{torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**30:0.2f}GB
"""
def A ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE : str = pd.DataFrame(_lowercase )
SCREAMING_SNAKE_CASE : List[Any] = '''variation'''
SCREAMING_SNAKE_CASE : Dict = '''diff_%'''
SCREAMING_SNAKE_CASE : int = nan
if base_variation is not None and len(df[df[variation_key] == base_variation] ):
# this may still return nan
SCREAMING_SNAKE_CASE : Dict = df.loc[df[variation_key] == base_variation][target_metric_key].item()
if math.isnan(_lowercase ):
# as a fallback, use the minimal value as the sentinel
SCREAMING_SNAKE_CASE : Optional[Any] = df.loc[df[target_metric_key] != nan][target_metric_key].min()
# create diff column if possible
if not math.isnan(_lowercase ):
SCREAMING_SNAKE_CASE : List[Any] = df.apply(
lambda _lowercase : round(100 * (r[target_metric_key] - sentinel_value) / sentinel_value )
if not math.isnan(r[target_metric_key] )
else 0 , axis='''columns''' , )
# re-order columns
SCREAMING_SNAKE_CASE : List[str] = [variation_key, target_metric_key, diff_key, *report_metric_keys]
SCREAMING_SNAKE_CASE : Any = df.reindex(_lowercase , axis='''columns''' ) # reorder cols
# capitalize
SCREAMING_SNAKE_CASE : int = df.rename(str.capitalize , axis='''columns''' )
# make the cols as narrow as possible
SCREAMING_SNAKE_CASE : List[Any] = df.rename(lambda _lowercase : c.replace('''_''' , '''<br>''' ) , axis='''columns''' )
SCREAMING_SNAKE_CASE : Optional[int] = df.rename(lambda _lowercase : c.replace('''_''' , '''\n''' ) , axis='''columns''' )
SCREAMING_SNAKE_CASE : List[Any] = ['''''', '''Copy between the cut-here-lines and paste as is to github or a forum''']
report += ["----------8<-----------------8<--------"]
report += ["*** Results:", df_github.to_markdown(index=_lowercase , floatfmt='''.2f''' )]
report += ["```"]
report += ["*** Setup:", get_versions()]
report += ["*** The benchmark command line was:", get_original_command()]
report += ["```"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results (console):", df_console.to_markdown(index=_lowercase , floatfmt='''.2f''' )]
print('''\n\n'''.join(_lowercase ) )
def A ( ):
SCREAMING_SNAKE_CASE : List[str] = argparse.ArgumentParser()
parser.add_argument(
'''--base-cmd''' , default=_lowercase , type=_lowercase , required=_lowercase , help='''Base cmd''' , )
parser.add_argument(
'''--variations''' , default=_lowercase , type=_lowercase , nargs='''+''' , required=_lowercase , help='''Multi-dimensional variations, example: \'|--fp16|--bf16\' \'|--tf32\'''' , )
parser.add_argument(
'''--base-variation''' , default=_lowercase , type=_lowercase , help='''Baseline variation to compare to. if None the minimal target value will be used to compare against''' , )
parser.add_argument(
'''--target-metric-key''' , default=_lowercase , type=_lowercase , required=_lowercase , help='''Target metric key in output_dir/all_results.json, e.g., train_samples_per_second''' , )
parser.add_argument(
'''--report-metric-keys''' , default='''''' , type=_lowercase , help='''Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., \'train_loss train_samples''' , )
parser.add_argument(
'''--repeat-times''' , default=1 , type=_lowercase , help='''How many times to re-run each variation - an average will be reported''' , )
parser.add_argument(
'''--output_dir''' , default='''output_benchmark''' , type=_lowercase , help='''The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked''' , )
parser.add_argument(
'''--verbose''' , default=_lowercase , action='''store_true''' , help='''Whether to show the outputs of each run or just the benchmark progress''' , )
SCREAMING_SNAKE_CASE : Union[str, Any] = parser.parse_args()
SCREAMING_SNAKE_CASE : Optional[int] = args.output_dir
Path(_lowercase ).mkdir(exist_ok=_lowercase )
SCREAMING_SNAKE_CASE : Union[str, Any] = get_base_command(_lowercase , _lowercase )
# split each dimension into its --foo variations
SCREAMING_SNAKE_CASE : str = [list(map(str.strip , re.split(R'''\|''' , _lowercase ) ) ) for x in args.variations]
# build a cartesian product of dimensions and convert those back into cmd-line arg strings,
# while stripping white space for inputs that were empty
SCREAMING_SNAKE_CASE : Any = list(map(str.strip , map(''' '''.join , itertools.product(*_lowercase ) ) ) )
SCREAMING_SNAKE_CASE : List[Any] = max(len(_lowercase ) for x in variations )
# split wanted keys
SCREAMING_SNAKE_CASE : Union[str, Any] = args.report_metric_keys.split()
# capture prints into a log file for convenience
SCREAMING_SNAKE_CASE : Any = f"""benchmark-report-{datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S' )}.txt"""
print(f"""\nNote: each run's output is also logged under {output_dir}/log.*.std*.txt""" )
print(f"""and this script's output is also piped into {report_fn}""" )
SCREAMING_SNAKE_CASE : Optional[int] = Tee(_lowercase )
print(f"""\n*** Running {len(_lowercase )} benchmarks:""" )
print(f"""Base command: {' '.join(_lowercase )}""" )
SCREAMING_SNAKE_CASE : int = '''variation'''
SCREAMING_SNAKE_CASE : List[Any] = []
for id, variation in enumerate(tqdm(_lowercase , desc='''Total completion: ''' , leave=_lowercase ) ):
SCREAMING_SNAKE_CASE : Any = base_cmd + variation.split()
results.append(
process_run(
id + 1 , _lowercase , _lowercase , _lowercase , _lowercase , args.target_metric_key , _lowercase , args.repeat_times , _lowercase , args.verbose , ) )
process_results(_lowercase , args.target_metric_key , _lowercase , args.base_variation , _lowercase )
if __name__ == "__main__":
main()
| 34
|
import os
import unittest
from transformers import FunnelTokenizer, FunnelTokenizerFast
from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowercase__ ( UpperCamelCase_ , unittest.TestCase):
UpperCamelCase_ = FunnelTokenizer
UpperCamelCase_ = FunnelTokenizerFast
UpperCamelCase_ = True
UpperCamelCase_ = True
def __A ( self : Union[str, Any] ):
'''simple docstring'''
super().setUp()
SCREAMING_SNAKE_CASE : Optional[Any] = [
'''<unk>''',
'''<cls>''',
'''<sep>''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
SCREAMING_SNAKE_CASE : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def __A ( self : int , **UpperCamelCase__ : List[Any] ):
'''simple docstring'''
return FunnelTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def __A ( self : int , **UpperCamelCase__ : List[Any] ):
'''simple docstring'''
return FunnelTokenizerFast.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def __A ( self : Any , UpperCamelCase__ : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = '''UNwant\u00E9d,running'''
SCREAMING_SNAKE_CASE : int = '''unwanted, running'''
return input_text, output_text
def __A ( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer_class(self.vocab_file )
SCREAMING_SNAKE_CASE : int = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(UpperCamelCase__ , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , [7, 4, 5, 10, 8, 9] )
def __A ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self.get_tokenizers(do_lower_case=UpperCamelCase__ )
for tokenizer in tokenizers:
SCREAMING_SNAKE_CASE : int = tokenizer('''UNwant\u00E9d,running''' )
SCREAMING_SNAKE_CASE : Optional[Any] = len(inputs['''input_ids'''] ) - 1
self.assertListEqual(inputs['''token_type_ids'''] , [2] + [0] * sentence_len )
SCREAMING_SNAKE_CASE : List[Any] = tokenizer('''UNwant\u00E9d,running''' , '''UNwant\u00E9d,running''' )
self.assertListEqual(inputs['''token_type_ids'''] , [2] + [0] * sentence_len + [1] * sentence_len )
| 34
| 1
|
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
__UpperCamelCase : Any = {
'return_dict': False,
'output_hidden_states': True,
'output_attentions': True,
'torchscript': True,
'torch_dtype': 'float16',
'use_bfloat16': True,
'tf_legacy_loss': True,
'pruned_heads': {'a': 1},
'tie_word_embeddings': False,
'is_decoder': True,
'cross_attention_hidden_size': 128,
'add_cross_attention': True,
'tie_encoder_decoder': True,
'max_length': 50,
'min_length': 3,
'do_sample': True,
'early_stopping': True,
'num_beams': 3,
'num_beam_groups': 3,
'diversity_penalty': 0.5,
'temperature': 2.0,
'top_k': 10,
'top_p': 0.7,
'typical_p': 0.2,
'repetition_penalty': 0.8,
'length_penalty': 0.8,
'no_repeat_ngram_size': 5,
'encoder_no_repeat_ngram_size': 5,
'bad_words_ids': [1, 2, 3],
'num_return_sequences': 3,
'chunk_size_feed_forward': 5,
'output_scores': True,
'return_dict_in_generate': True,
'forced_bos_token_id': 2,
'forced_eos_token_id': 3,
'remove_invalid_values': True,
'architectures': ['BertModel'],
'finetuning_task': 'translation',
'id2label': {0: 'label'},
'label2id': {'label': '0'},
'tokenizer_class': 'BertTokenizerFast',
'prefix': 'prefix',
'bos_token_id': 6,
'pad_token_id': 7,
'eos_token_id': 8,
'sep_token_id': 9,
'decoder_start_token_id': 10,
'exponential_decay_length_penalty': (5, 1.01),
'suppress_tokens': [0, 1],
'begin_suppress_tokens': 2,
'task_specific_params': {'translation': 'some_params'},
'problem_type': 'regression',
}
@is_staging_test
class lowercase__ ( unittest.TestCase):
@classmethod
def __A ( cls : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = TOKEN
HfFolder.save_token(UpperCamelCase__ )
@classmethod
def __A ( cls : Optional[Any] ):
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id='''test-config''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-config-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-config''' )
except HTTPError:
pass
def __A ( self : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub('''test-config''' , use_auth_token=self._token )
SCREAMING_SNAKE_CASE : Optional[int] = BertConfig.from_pretrained(f"""{USER}/test-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCamelCase__ , getattr(UpperCamelCase__ , UpperCamelCase__ ) )
# Reset repo
delete_repo(token=self._token , repo_id='''test-config''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(UpperCamelCase__ , repo_id='''test-config''' , push_to_hub=UpperCamelCase__ , use_auth_token=self._token )
SCREAMING_SNAKE_CASE : Optional[int] = BertConfig.from_pretrained(f"""{USER}/test-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCamelCase__ , getattr(UpperCamelCase__ , UpperCamelCase__ ) )
def __A ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub('''valid_org/test-config-org''' , use_auth_token=self._token )
SCREAMING_SNAKE_CASE : List[Any] = BertConfig.from_pretrained('''valid_org/test-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCamelCase__ , getattr(UpperCamelCase__ , UpperCamelCase__ ) )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-config-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
UpperCamelCase__ , repo_id='''valid_org/test-config-org''' , push_to_hub=UpperCamelCase__ , use_auth_token=self._token )
SCREAMING_SNAKE_CASE : Any = BertConfig.from_pretrained('''valid_org/test-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCamelCase__ , getattr(UpperCamelCase__ , UpperCamelCase__ ) )
def __A ( self : List[str] ):
'''simple docstring'''
CustomConfig.register_for_auto_class()
SCREAMING_SNAKE_CASE : List[Any] = CustomConfig(attribute=42 )
config.push_to_hub('''test-dynamic-config''' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {'''AutoConfig''': '''custom_configuration.CustomConfig'''} )
SCREAMING_SNAKE_CASE : int = AutoConfig.from_pretrained(f"""{USER}/test-dynamic-config""" , trust_remote_code=UpperCamelCase__ )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , '''CustomConfig''' )
self.assertEqual(new_config.attribute , 42 )
class lowercase__ ( unittest.TestCase):
def __A ( self : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
SCREAMING_SNAKE_CASE : Union[str, Any] = c.n_embd + 1 # int
SCREAMING_SNAKE_CASE : Dict = c.resid_pdrop + 1.0 # float
SCREAMING_SNAKE_CASE : Optional[int] = not c.scale_attn_weights # bool
SCREAMING_SNAKE_CASE : int = c.summary_type + '''foo''' # str
c.update_from_string(
f"""n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}""" )
self.assertEqual(UpperCamelCase__ , c.n_embd , '''mismatch for key: n_embd''' )
self.assertEqual(UpperCamelCase__ , c.resid_pdrop , '''mismatch for key: resid_pdrop''' )
self.assertEqual(UpperCamelCase__ , c.scale_attn_weights , '''mismatch for key: scale_attn_weights''' )
self.assertEqual(UpperCamelCase__ , c.summary_type , '''mismatch for key: summary_type''' )
def __A ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = PretrainedConfig()
SCREAMING_SNAKE_CASE : Optional[int] = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
UpperCamelCase__ , ['''is_encoder_decoder''', '''_name_or_path''', '''_commit_hash''', '''transformers_version'''] )
SCREAMING_SNAKE_CASE : Optional[int] = [key for key, value in config_common_kwargs.items() if value == getattr(UpperCamelCase__ , UpperCamelCase__ )]
if len(UpperCamelCase__ ) > 0:
raise ValueError(
'''The following keys are set with the default values in'''
''' `test_configuration_common.config_common_kwargs` pick another value for them:'''
f""" {', '.join(UpperCamelCase__ )}.""" )
def __A ( self : Union[str, Any] ):
'''simple docstring'''
with self.assertRaises(UpperCamelCase__ ):
# config is in subfolder, the following should not work without specifying the subfolder
SCREAMING_SNAKE_CASE : List[Any] = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert-subfolder''' )
SCREAMING_SNAKE_CASE : Tuple = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert-subfolder''' , subfolder='''bert''' )
self.assertIsNotNone(UpperCamelCase__ )
def __A ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = mock.Mock()
SCREAMING_SNAKE_CASE : int = 500
SCREAMING_SNAKE_CASE : str = {}
SCREAMING_SNAKE_CASE : Any = HTTPError
SCREAMING_SNAKE_CASE : Optional[Any] = {}
# Download this model to make sure it's in the cache.
SCREAMING_SNAKE_CASE : Dict = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('''requests.Session.request''' , return_value=UpperCamelCase__ ) as mock_head:
SCREAMING_SNAKE_CASE : str = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
# This check we did call the fake head request
mock_head.assert_called()
def __A ( self : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = BertConfig.from_pretrained(
'''https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json''' )
def __A ( self : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = AutoConfig.from_pretrained('''bert-base-cased''' )
SCREAMING_SNAKE_CASE : Tuple = ['''config.4.0.0.json''']
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = 2
json.dump(configuration.to_dict() , open(os.path.join(UpperCamelCase__ , '''config.4.0.0.json''' ) , '''w''' ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
SCREAMING_SNAKE_CASE : List[Any] = AutoConfig.from_pretrained(UpperCamelCase__ )
self.assertEqual(new_configuration.hidden_size , 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
SCREAMING_SNAKE_CASE : Any = ['''config.42.0.0.json''']
SCREAMING_SNAKE_CASE : Dict = 768
configuration.save_pretrained(UpperCamelCase__ )
shutil.move(os.path.join(UpperCamelCase__ , '''config.4.0.0.json''' ) , os.path.join(UpperCamelCase__ , '''config.42.0.0.json''' ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = AutoConfig.from_pretrained(UpperCamelCase__ )
self.assertEqual(new_configuration.hidden_size , 768 )
def __A ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = '''hf-internal-testing/test-two-configs'''
import transformers as new_transformers
SCREAMING_SNAKE_CASE : int = '''v4.0.0'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = new_transformers.models.auto.AutoConfig.from_pretrained(
UpperCamelCase__ , return_unused_kwargs=UpperCamelCase__ )
self.assertEqual(new_configuration.hidden_size , 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(UpperCamelCase__ , {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
SCREAMING_SNAKE_CASE : Optional[int] = '''v3.0.0'''
SCREAMING_SNAKE_CASE : Dict = old_transformers.models.auto.AutoConfig.from_pretrained(UpperCamelCase__ )
self.assertEqual(old_configuration.hidden_size , 768 )
| 34
|
import numpy as np
import torch
from torch.utils.data import Dataset, IterableDataset
from ..utils.generic import ModelOutput
class lowercase__ ( UpperCamelCase_):
def __init__( self : str , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = dataset
SCREAMING_SNAKE_CASE : Optional[Any] = process
SCREAMING_SNAKE_CASE : Union[str, Any] = params
def __len__( self : Tuple ):
'''simple docstring'''
return len(self.dataset )
def __getitem__( self : List[str] , UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.dataset[i]
SCREAMING_SNAKE_CASE : Optional[int] = self.process(UpperCamelCase__ , **self.params )
return processed
class lowercase__ ( UpperCamelCase_):
def __init__( self : Dict , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any]=None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = loader
SCREAMING_SNAKE_CASE : List[Any] = infer
SCREAMING_SNAKE_CASE : int = params
if loader_batch_size == 1:
# Let's spare some time by deactivating altogether
SCREAMING_SNAKE_CASE : Optional[int] = None
SCREAMING_SNAKE_CASE : List[str] = loader_batch_size
# Internal bookkeeping
SCREAMING_SNAKE_CASE : Optional[int] = None
SCREAMING_SNAKE_CASE : int = None
def __len__( self : int ):
'''simple docstring'''
return len(self.loader )
def __iter__( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = iter(self.loader )
return self
def __A ( self : List[str] ):
'''simple docstring'''
if isinstance(self._loader_batch_data , torch.Tensor ):
# Batch data is simple tensor, just fetch the slice
SCREAMING_SNAKE_CASE : Optional[Any] = self._loader_batch_data[self._loader_batch_index]
else:
# Batch data is assumed to be BaseModelOutput (or dict)
SCREAMING_SNAKE_CASE : Union[str, Any] = {}
for k, element in self._loader_batch_data.items():
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
# Convert ModelOutput to tuple first
SCREAMING_SNAKE_CASE : Dict = element.to_tuple()
if isinstance(element[0] , torch.Tensor ):
SCREAMING_SNAKE_CASE : Dict = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
SCREAMING_SNAKE_CASE : Union[str, Any] = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(UpperCamelCase__ , UpperCamelCase__ ):
# Those are stored as lists of tensors so need specific unbatching.
if isinstance(element[0] , torch.Tensor ):
SCREAMING_SNAKE_CASE : Union[str, Any] = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
SCREAMING_SNAKE_CASE : List[str] = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if element is None:
# This can happen for optional data that get passed around
SCREAMING_SNAKE_CASE : int = None
elif isinstance(element[self._loader_batch_index] , torch.Tensor ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
SCREAMING_SNAKE_CASE : Union[str, Any] = element[self._loader_batch_index].unsqueeze(0 )
elif isinstance(element[self._loader_batch_index] , np.ndarray ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
SCREAMING_SNAKE_CASE : Tuple = np.expand_dims(element[self._loader_batch_index] , 0 )
else:
# This is typically a list, so no need to `unsqueeze`.
SCREAMING_SNAKE_CASE : Tuple = element[self._loader_batch_index]
# Recreate the element by reusing the original class to make it look
# batch_size=1
SCREAMING_SNAKE_CASE : Any = self._loader_batch_data.__class__(UpperCamelCase__ )
self._loader_batch_index += 1
return result
def __A ( self : Union[str, Any] ):
'''simple docstring'''
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
# We are currently unrolling a batch so we just need to return
# the current item within a batch
return self.loader_batch_item()
# We're out of items within a batch
SCREAMING_SNAKE_CASE : Tuple = next(self.iterator )
SCREAMING_SNAKE_CASE : List[Any] = self.infer(UpperCamelCase__ , **self.params )
# We now have a batch of "inferred things".
if self.loader_batch_size is not None:
# Try to infer the size of the batch
if isinstance(UpperCamelCase__ , torch.Tensor ):
SCREAMING_SNAKE_CASE : Optional[int] = processed
else:
SCREAMING_SNAKE_CASE : int = list(processed.keys() )[0]
SCREAMING_SNAKE_CASE : int = processed[key]
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE : List[Any] = len(UpperCamelCase__ )
else:
SCREAMING_SNAKE_CASE : Dict = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
SCREAMING_SNAKE_CASE : List[Any] = observed_batch_size
# Setting internal index to unwrap the batch
SCREAMING_SNAKE_CASE : List[Any] = processed
SCREAMING_SNAKE_CASE : int = 0
return self.loader_batch_item()
else:
# We're not unrolling batches
return processed
class lowercase__ ( UpperCamelCase_):
def __init__( self : str , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Union[str, Any]=None ):
'''simple docstring'''
super().__init__(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def __iter__( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = iter(self.loader )
SCREAMING_SNAKE_CASE : List[Any] = None
return self
def __A ( self : List[str] ):
'''simple docstring'''
if self.subiterator is None:
SCREAMING_SNAKE_CASE : Dict = self.infer(next(self.iterator ) , **self.params )
try:
# Try to return next item
SCREAMING_SNAKE_CASE : Any = next(self.subiterator )
except StopIteration:
# When a preprocess iterator ends, we can start lookig at the next item
# ChunkIterator will keep feeding until ALL elements of iterator
# all have created their subiterator and have been iterating against.
#
# Another way to look at it, is we're basically flattening lists of lists
# into a single list, but with generators
SCREAMING_SNAKE_CASE : Optional[Any] = self.infer(next(self.iterator ) , **self.params )
SCREAMING_SNAKE_CASE : Union[str, Any] = next(self.subiterator )
return processed
class lowercase__ ( UpperCamelCase_):
def __iter__( self : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = iter(self.loader )
return self
def __A ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = False
SCREAMING_SNAKE_CASE : Optional[int] = []
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
while self._loader_batch_index < self.loader_batch_size:
SCREAMING_SNAKE_CASE : Tuple = self.loader_batch_item()
SCREAMING_SNAKE_CASE : Any = item.pop('''is_last''' )
accumulator.append(UpperCamelCase__ )
if is_last:
return accumulator
while not is_last:
SCREAMING_SNAKE_CASE : Any = self.infer(next(self.iterator ) , **self.params )
if self.loader_batch_size is not None:
if isinstance(UpperCamelCase__ , torch.Tensor ):
SCREAMING_SNAKE_CASE : Tuple = processed
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = list(processed.keys() )[0]
SCREAMING_SNAKE_CASE : List[str] = processed[key]
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE : List[Any] = len(UpperCamelCase__ )
else:
SCREAMING_SNAKE_CASE : int = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
SCREAMING_SNAKE_CASE : List[str] = observed_batch_size
SCREAMING_SNAKE_CASE : List[Any] = processed
SCREAMING_SNAKE_CASE : str = 0
while self._loader_batch_index < self.loader_batch_size:
SCREAMING_SNAKE_CASE : Any = self.loader_batch_item()
SCREAMING_SNAKE_CASE : List[Any] = item.pop('''is_last''' )
accumulator.append(UpperCamelCase__ )
if is_last:
return accumulator
else:
SCREAMING_SNAKE_CASE : int = processed
SCREAMING_SNAKE_CASE : List[str] = item.pop('''is_last''' )
accumulator.append(UpperCamelCase__ )
return accumulator
class lowercase__ ( UpperCamelCase_):
def __init__( self : Optional[Any] , UpperCamelCase__ : Dataset , UpperCamelCase__ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = dataset
SCREAMING_SNAKE_CASE : Dict = key
def __len__( self : Optional[int] ):
'''simple docstring'''
return len(self.dataset )
def __getitem__( self : Dict , UpperCamelCase__ : Tuple ):
'''simple docstring'''
return self.dataset[i][self.key]
class lowercase__ ( UpperCamelCase_):
def __init__( self : List[Any] , UpperCamelCase__ : Dataset , UpperCamelCase__ : str , UpperCamelCase__ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = dataset
SCREAMING_SNAKE_CASE : List[str] = keya
SCREAMING_SNAKE_CASE : Tuple = keya
def __len__( self : List[str] ):
'''simple docstring'''
return len(self.dataset )
def __getitem__( self : Union[str, Any] , UpperCamelCase__ : Any ):
'''simple docstring'''
return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
| 34
| 1
|
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
__UpperCamelCase : Dict = logging.get_logger(__name__)
__UpperCamelCase : Union[str, Any] = {
'linear': get_linear_schedule_with_warmup,
'cosine': get_cosine_schedule_with_warmup,
'cosine_w_restarts': get_cosine_with_hard_restarts_schedule_with_warmup,
'polynomial': get_polynomial_decay_schedule_with_warmup,
'constant': get_constant_schedule,
'constant_w_warmup': get_constant_schedule_with_warmup,
}
class lowercase__ ( UpperCamelCase_):
def __init__( self : Union[str, Any] , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : Optional[int]=None , *UpperCamelCase__ : Dict , **UpperCamelCase__ : str ):
'''simple docstring'''
super().__init__(*UpperCamelCase__ , **UpperCamelCase__ )
if config is None:
assert isinstance(self.model , UpperCamelCase__ ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
f""" {self.model.__class__}"""
)
SCREAMING_SNAKE_CASE : List[Any] = self.model.config
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = config
SCREAMING_SNAKE_CASE : List[Any] = data_args
SCREAMING_SNAKE_CASE : str = self.config.tgt_vocab_size if isinstance(self.config , UpperCamelCase__ ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
f"""The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for"""
''' padding..''' )
if self.args.label_smoothing == 0:
SCREAMING_SNAKE_CASE : int = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
SCREAMING_SNAKE_CASE : List[str] = label_smoothed_nll_loss
def __A ( self : Any , UpperCamelCase__ : int ):
'''simple docstring'''
if self.optimizer is None:
SCREAMING_SNAKE_CASE : str = ['''bias''', '''LayerNorm.weight''']
SCREAMING_SNAKE_CASE : Optional[int] = [
{
'''params''': [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
'''weight_decay''': self.args.weight_decay,
},
{
'''params''': [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
'''weight_decay''': 0.0,
},
]
SCREAMING_SNAKE_CASE : int = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
SCREAMING_SNAKE_CASE : str = Adafactor
SCREAMING_SNAKE_CASE : Optional[Any] = {'''scale_parameter''': False, '''relative_step''': False}
else:
SCREAMING_SNAKE_CASE : Optional[int] = AdamW
SCREAMING_SNAKE_CASE : Any = {
'''betas''': (self.args.adam_betaa, self.args.adam_betaa),
'''eps''': self.args.adam_epsilon,
}
SCREAMING_SNAKE_CASE : Dict = self.args.learning_rate
if self.sharded_ddp:
SCREAMING_SNAKE_CASE : Any = OSS(
params=UpperCamelCase__ , optim=UpperCamelCase__ , **UpperCamelCase__ , )
else:
SCREAMING_SNAKE_CASE : Any = optimizer_cls(UpperCamelCase__ , **UpperCamelCase__ )
if self.lr_scheduler is None:
SCREAMING_SNAKE_CASE : List[Any] = self._get_lr_scheduler(UpperCamelCase__ )
else: # ignoring --lr_scheduler
logger.warning('''scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.''' )
def __A ( self : Union[str, Any] , UpperCamelCase__ : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
SCREAMING_SNAKE_CASE : List[str] = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
SCREAMING_SNAKE_CASE : Optional[Any] = schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps )
else:
SCREAMING_SNAKE_CASE : str = schedule_func(
self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=UpperCamelCase__ )
return scheduler
def __A ( self : Any ):
'''simple docstring'''
if isinstance(self.train_dataset , torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , )
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def __A ( self : Union[str, Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[str] ):
'''simple docstring'''
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
SCREAMING_SNAKE_CASE : List[str] = model(**UpperCamelCase__ , use_cache=UpperCamelCase__ )[0]
SCREAMING_SNAKE_CASE : List[str] = self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) )
else:
# compute usual loss via models
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = model(**UpperCamelCase__ , labels=UpperCamelCase__ , use_cache=UpperCamelCase__ )[:2]
else:
# compute label smoothed loss
SCREAMING_SNAKE_CASE : List[str] = model(**UpperCamelCase__ , use_cache=UpperCamelCase__ )[0]
SCREAMING_SNAKE_CASE : int = torch.nn.functional.log_softmax(UpperCamelCase__ , dim=-1 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = self.loss_fn(UpperCamelCase__ , UpperCamelCase__ , self.args.label_smoothing , ignore_index=self.config.pad_token_id )
return loss, logits
def __A ( self : str , UpperCamelCase__ : Dict , UpperCamelCase__ : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = inputs.pop('''labels''' )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = self._compute_loss(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return loss
def __A ( self : str , UpperCamelCase__ : nn.Module , UpperCamelCase__ : Dict[str, Union[torch.Tensor, Any]] , UpperCamelCase__ : bool , UpperCamelCase__ : Optional[List[str]] = None , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self._prepare_inputs(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Dict = {
'''max_length''': self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
'''num_beams''': self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
SCREAMING_SNAKE_CASE : Optional[Any] = self.model.generate(
inputs['''input_ids'''] , attention_mask=inputs['''attention_mask'''] , **UpperCamelCase__ , )
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
SCREAMING_SNAKE_CASE : Optional[Any] = self._pad_tensors_to_max_len(UpperCamelCase__ , gen_kwargs['''max_length'''] )
SCREAMING_SNAKE_CASE : Tuple = inputs.pop('''labels''' )
with torch.no_grad():
# compute loss on predict data
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = self._compute_loss(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
SCREAMING_SNAKE_CASE : int = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
SCREAMING_SNAKE_CASE : Dict = self._pad_tensors_to_max_len(UpperCamelCase__ , gen_kwargs['''max_length'''] )
return (loss, logits, labels)
def __A ( self : List[str] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
'''Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be'''
f""" padded to `max_length`={max_length}""" )
SCREAMING_SNAKE_CASE : List[Any] = pad_token_id * torch.ones(
(tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device )
SCREAMING_SNAKE_CASE : Tuple = tensor
return padded_tensor
| 34
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
__UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
__UpperCamelCase : Optional[Any] = {
'microsoft/deberta-v2-xlarge': 'https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json',
'microsoft/deberta-v2-xxlarge': 'https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json',
'microsoft/deberta-v2-xlarge-mnli': (
'https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json'
),
'microsoft/deberta-v2-xxlarge-mnli': (
'https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json'
),
}
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = """deberta-v2"""
def __init__( self : Optional[Any] , UpperCamelCase__ : Any=12_8100 , UpperCamelCase__ : Optional[int]=1536 , UpperCamelCase__ : Dict=24 , UpperCamelCase__ : List[str]=24 , UpperCamelCase__ : Tuple=6144 , UpperCamelCase__ : List[Any]="gelu" , UpperCamelCase__ : Optional[int]=0.1 , UpperCamelCase__ : List[str]=0.1 , UpperCamelCase__ : Optional[int]=512 , UpperCamelCase__ : Optional[Any]=0 , UpperCamelCase__ : Union[str, Any]=0.02 , UpperCamelCase__ : List[Any]=1E-7 , UpperCamelCase__ : List[str]=False , UpperCamelCase__ : str=-1 , UpperCamelCase__ : List[Any]=0 , UpperCamelCase__ : List[Any]=True , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : Tuple=0 , UpperCamelCase__ : str="gelu" , **UpperCamelCase__ : Optional[int] , ):
'''simple docstring'''
super().__init__(**UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = hidden_size
SCREAMING_SNAKE_CASE : int = num_hidden_layers
SCREAMING_SNAKE_CASE : Union[str, Any] = num_attention_heads
SCREAMING_SNAKE_CASE : List[Any] = intermediate_size
SCREAMING_SNAKE_CASE : Any = hidden_act
SCREAMING_SNAKE_CASE : Dict = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Optional[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : str = max_position_embeddings
SCREAMING_SNAKE_CASE : Dict = type_vocab_size
SCREAMING_SNAKE_CASE : str = initializer_range
SCREAMING_SNAKE_CASE : Optional[Any] = relative_attention
SCREAMING_SNAKE_CASE : Optional[Any] = max_relative_positions
SCREAMING_SNAKE_CASE : Optional[int] = pad_token_id
SCREAMING_SNAKE_CASE : Optional[Any] = position_biased_input
# Backwards compatibility
if type(UpperCamelCase__ ) == str:
SCREAMING_SNAKE_CASE : Optional[int] = [x.strip() for x in pos_att_type.lower().split('''|''' )]
SCREAMING_SNAKE_CASE : Any = pos_att_type
SCREAMING_SNAKE_CASE : int = vocab_size
SCREAMING_SNAKE_CASE : Dict = layer_norm_eps
SCREAMING_SNAKE_CASE : Tuple = kwargs.get('''pooler_hidden_size''' , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Any = pooler_dropout
SCREAMING_SNAKE_CASE : Union[str, Any] = pooler_hidden_act
class lowercase__ ( UpperCamelCase_):
@property
def __A ( self : Union[str, Any] ):
'''simple docstring'''
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE : Optional[int] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
SCREAMING_SNAKE_CASE : Optional[Any] = {0: '''batch''', 1: '''sequence'''}
if self._config.type_vocab_size > 0:
return OrderedDict(
[('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ('''token_type_ids''', dynamic_axis)] )
else:
return OrderedDict([('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis)] )
@property
def __A ( self : Union[str, Any] ):
'''simple docstring'''
return 12
def __A ( self : Dict , UpperCamelCase__ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , UpperCamelCase__ : int = -1 , UpperCamelCase__ : int = -1 , UpperCamelCase__ : int = -1 , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional["TensorType"] = None , UpperCamelCase__ : int = 3 , UpperCamelCase__ : int = 40 , UpperCamelCase__ : int = 40 , UpperCamelCase__ : "PreTrainedTokenizerBase" = None , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = super().generate_dummy_inputs(preprocessor=UpperCamelCase__ , framework=UpperCamelCase__ )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 34
| 1
|
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=UpperCamelCase_)
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = field(default="""automatic-speech-recognition""" , metadata={"""include_in_asdict_even_if_is_default""": True})
UpperCamelCase_ = Features({"""audio""": Audio()})
UpperCamelCase_ = Features({"""transcription""": Value("""string""")})
UpperCamelCase_ = "audio"
UpperCamelCase_ = "transcription"
def __A ( self : Tuple , UpperCamelCase__ : List[str] ):
'''simple docstring'''
if self.audio_column not in features:
raise ValueError(f"""Column {self.audio_column} is not present in features.""" )
if not isinstance(features[self.audio_column] , UpperCamelCase__ ):
raise ValueError(f"""Column {self.audio_column} is not an Audio type.""" )
SCREAMING_SNAKE_CASE : Optional[Any] = copy.deepcopy(self )
SCREAMING_SNAKE_CASE : Any = self.input_schema.copy()
SCREAMING_SNAKE_CASE : List[str] = features[self.audio_column]
SCREAMING_SNAKE_CASE : str = input_schema
return task_template
@property
def __A ( self : Union[str, Any] ):
'''simple docstring'''
return {self.audio_column: "audio", self.transcription_column: "transcription"}
| 34
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCamelCase : str = logging.get_logger(__name__)
def A ( _lowercase ):
SCREAMING_SNAKE_CASE : Any = '''huggingface/label-files'''
SCREAMING_SNAKE_CASE : Any = '''imagenet-1k-id2label.json'''
SCREAMING_SNAKE_CASE : Any = json.load(open(hf_hub_download(_lowercase , _lowercase , repo_type='''dataset''' ) , '''r''' ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = {int(_lowercase ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE : Dict = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE : str = '''std_conv''' if '''bit''' in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
SCREAMING_SNAKE_CASE : Optional[int] = BitConfig(
conv_layer=_lowercase , num_labels=1_000 , idalabel=_lowercase , labelaid=_lowercase , )
return config
def A ( _lowercase ):
if "stem.conv" in name:
SCREAMING_SNAKE_CASE : Optional[int] = name.replace('''stem.conv''' , '''bit.embedder.convolution''' )
if "blocks" in name:
SCREAMING_SNAKE_CASE : Tuple = name.replace('''blocks''' , '''layers''' )
if "head.fc" in name:
SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace('''head.fc''' , '''classifier.1''' )
if name.startswith('''norm''' ):
SCREAMING_SNAKE_CASE : str = '''bit.''' + name
if "bit" not in name and "classifier" not in name:
SCREAMING_SNAKE_CASE : Union[str, Any] = '''bit.encoder.''' + name
return name
def A ( ):
SCREAMING_SNAKE_CASE : Any = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
SCREAMING_SNAKE_CASE : List[str] = Image.open(requests.get(_lowercase , stream=_lowercase ).raw )
return im
@torch.no_grad()
def A ( _lowercase , _lowercase , _lowercase=False ):
SCREAMING_SNAKE_CASE : List[Any] = get_config(_lowercase )
# load original model from timm
SCREAMING_SNAKE_CASE : Optional[Any] = create_model(_lowercase , pretrained=_lowercase )
timm_model.eval()
# load state_dict of original model
SCREAMING_SNAKE_CASE : Optional[int] = timm_model.state_dict()
for key in state_dict.copy().keys():
SCREAMING_SNAKE_CASE : Dict = state_dict.pop(_lowercase )
SCREAMING_SNAKE_CASE : Optional[int] = val.squeeze() if '''head''' in key else val
# load HuggingFace model
SCREAMING_SNAKE_CASE : str = BitForImageClassification(_lowercase )
model.eval()
model.load_state_dict(_lowercase )
# create image processor
SCREAMING_SNAKE_CASE : Optional[Any] = create_transform(**resolve_data_config({} , model=_lowercase ) )
SCREAMING_SNAKE_CASE : List[str] = transform.transforms
SCREAMING_SNAKE_CASE : Union[str, Any] = {
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
SCREAMING_SNAKE_CASE : Tuple = BitImageProcessor(
do_resize=_lowercase , size={'''shortest_edge''': timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=_lowercase , crop_size={'''height''': timm_transforms[1].size[0], '''width''': timm_transforms[1].size[1]} , do_normalize=_lowercase , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
SCREAMING_SNAKE_CASE : Any = prepare_img()
SCREAMING_SNAKE_CASE : Union[str, Any] = transform(_lowercase ).unsqueeze(0 )
SCREAMING_SNAKE_CASE : Optional[int] = processor(_lowercase , return_tensors='''pt''' ).pixel_values
# verify pixel values
assert torch.allclose(_lowercase , _lowercase )
# verify logits
with torch.no_grad():
SCREAMING_SNAKE_CASE : Dict = model(_lowercase )
SCREAMING_SNAKE_CASE : Optional[Any] = outputs.logits
print('''Logits:''' , logits[0, :3] )
print('''Predicted class:''' , model.config.idalabel[logits.argmax(-1 ).item()] )
SCREAMING_SNAKE_CASE : List[Any] = timm_model(_lowercase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_lowercase , outputs.logits , atol=1e-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
Path(_lowercase ).mkdir(exist_ok=_lowercase )
print(f"""Saving model {model_name} and processor to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowercase )
processor.save_pretrained(_lowercase )
if push_to_hub:
print(f"""Pushing model {model_name} and processor to the hub""" )
model.push_to_hub(f"""ybelkada/{model_name}""" )
processor.push_to_hub(f"""ybelkada/{model_name}""" )
if __name__ == "__main__":
__UpperCamelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='resnetv2_50x1_bitm',
type=str,
help='Name of the BiT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model to the hub.',
)
__UpperCamelCase : Optional[int] = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 34
| 1
|
import logging
import os
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
from tqdm import auto as tqdm_lib
__UpperCamelCase : Any = {
'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL,
}
__UpperCamelCase : Tuple = logging.WARNING
def A ( ):
SCREAMING_SNAKE_CASE : List[Any] = os.getenv('''DATASETS_VERBOSITY''' , _lowercase )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
f"""Unknown option DATASETS_VERBOSITY={env_level_str}, """
f"""has to be one of: { ', '.join(log_levels.keys() ) }""" )
return _default_log_level
def A ( ):
return __name__.split('''.''' )[0]
def A ( ):
return logging.getLogger(_get_library_name() )
def A ( ):
# Apply our default configuration to the library root logger.
SCREAMING_SNAKE_CASE : Any = _get_library_root_logger()
library_root_logger.setLevel(_get_default_logging_level() )
def A ( ):
SCREAMING_SNAKE_CASE : List[Any] = _get_library_root_logger()
library_root_logger.setLevel(logging.NOTSET )
def A ( _lowercase = None ):
if name is None:
SCREAMING_SNAKE_CASE : Tuple = _get_library_name()
return logging.getLogger(_lowercase )
def A ( ):
return _get_library_root_logger().getEffectiveLevel()
def A ( _lowercase ):
_get_library_root_logger().setLevel(_lowercase )
def A ( ):
return set_verbosity(_lowercase )
def A ( ):
return set_verbosity(_lowercase )
def A ( ):
return set_verbosity(_lowercase )
def A ( ):
return set_verbosity(_lowercase )
def A ( ):
SCREAMING_SNAKE_CASE : Any = False
def A ( ):
SCREAMING_SNAKE_CASE : Union[str, Any] = True
# Configure the library root logger at the module level (singleton-like)
_configure_library_root_logger()
class lowercase__ :
def __init__( self : str , *UpperCamelCase__ : List[Any] , **UpperCamelCase__ : int ): # pylint: disable=unused-argument
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = args[0] if args else None
def __iter__( self : Any ):
'''simple docstring'''
return iter(self._iterator )
def __getattr__( self : List[str] , UpperCamelCase__ : List[str] ):
'''simple docstring'''
def empty_fn(*UpperCamelCase__ : List[Any] , **UpperCamelCase__ : Tuple ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self : List[Any] ):
'''simple docstring'''
return self
def __exit__( self : Union[str, Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any] ):
'''simple docstring'''
return
__UpperCamelCase : Optional[int] = True
class lowercase__ :
def __call__( self : List[Any] , *UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : str=False , **UpperCamelCase__ : str ):
'''simple docstring'''
if _tqdm_active and not disable:
return tqdm_lib.tqdm(*UpperCamelCase__ , **UpperCamelCase__ )
else:
return EmptyTqdm(*UpperCamelCase__ , **UpperCamelCase__ )
def __A ( self : Tuple , *UpperCamelCase__ : List[Any] , **UpperCamelCase__ : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*UpperCamelCase__ , **UpperCamelCase__ )
def __A ( self : Dict ):
'''simple docstring'''
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
__UpperCamelCase : Union[str, Any] = _tqdm_cls()
def A ( ):
global _tqdm_active
return bool(_tqdm_active )
def A ( ):
global _tqdm_active
SCREAMING_SNAKE_CASE : Dict = True
def A ( ):
global _tqdm_active
SCREAMING_SNAKE_CASE : Union[str, Any] = False
| 34
|
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
__UpperCamelCase : str = [
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
'text-classification',
'language-modeling',
'summarization',
'token-classification',
'question-answering',
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
__UpperCamelCase : int = logging.getLogger()
def A ( ):
SCREAMING_SNAKE_CASE : str = argparse.ArgumentParser()
parser.add_argument('''-f''' )
SCREAMING_SNAKE_CASE : List[str] = parser.parse_args()
return args.f
def A ( _lowercase , _lowercase="eval" ):
SCREAMING_SNAKE_CASE : Dict = os.path.join(_lowercase , f"""{split}_results.json""" )
if os.path.exists(_lowercase ):
with open(_lowercase , '''r''' ) as f:
return json.load(_lowercase )
raise ValueError(f"""can't find {path}""" )
__UpperCamelCase : Optional[Any] = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class lowercase__ ( UpperCamelCase_):
def __A ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE : Tuple = f"""
run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--eval_steps=2
--warmup_steps=2
--seed=42
--max_seq_length=128
""".split()
with patch.object(UpperCamelCase__ , '''argv''' , UpperCamelCase__ ):
run_flax_glue.main()
SCREAMING_SNAKE_CASE : Union[str, Any] = get_results(UpperCamelCase__ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
@slow
def __A ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE : str = f"""
run_clm_flax.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--block_size 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
""".split()
with patch.object(UpperCamelCase__ , '''argv''' , UpperCamelCase__ ):
run_clm_flax.main()
SCREAMING_SNAKE_CASE : Dict = get_results(UpperCamelCase__ )
self.assertLess(result['''eval_perplexity'''] , 100 )
@slow
def __A ( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE : Union[str, Any] = f"""
run_summarization.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--test_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=8
--do_train
--do_eval
--do_predict
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--predict_with_generate
""".split()
with patch.object(UpperCamelCase__ , '''argv''' , UpperCamelCase__ ):
run_summarization_flax.main()
SCREAMING_SNAKE_CASE : Union[str, Any] = get_results(UpperCamelCase__ , split='''test''' )
self.assertGreaterEqual(result['''test_rouge1'''] , 10 )
self.assertGreaterEqual(result['''test_rouge2'''] , 2 )
self.assertGreaterEqual(result['''test_rougeL'''] , 7 )
self.assertGreaterEqual(result['''test_rougeLsum'''] , 7 )
@slow
def __A ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE : Dict = f"""
run_mlm.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--overwrite_output_dir
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--logging_steps 2 --eval_steps 2
--do_train
--do_eval
--num_train_epochs=1
""".split()
with patch.object(UpperCamelCase__ , '''argv''' , UpperCamelCase__ ):
run_mlm_flax.main()
SCREAMING_SNAKE_CASE : List[Any] = get_results(UpperCamelCase__ )
self.assertLess(result['''eval_perplexity'''] , 42 )
@slow
def __A ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE : Union[str, Any] = f"""
run_t5_mlm_flax.py
--model_name_or_path t5-small
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
""".split()
with patch.object(UpperCamelCase__ , '''argv''' , UpperCamelCase__ ):
run_ta_mlm_flax.main()
SCREAMING_SNAKE_CASE : Optional[int] = get_results(UpperCamelCase__ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.42 )
@slow
def __A ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = 7 if get_gpu_count() > 1 else 2
SCREAMING_SNAKE_CASE : Dict = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE : Any = f"""
run_flax_ner.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--do_train
--do_eval
--warmup_steps=2
--learning_rate=2e-4
--logging_steps 2 --eval_steps 2
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
""".split()
with patch.object(UpperCamelCase__ , '''argv''' , UpperCamelCase__ ):
run_flax_ner.main()
SCREAMING_SNAKE_CASE : List[str] = get_results(UpperCamelCase__ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
self.assertGreaterEqual(result['''eval_f1'''] , 0.3 )
@slow
def __A ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE : Union[str, Any] = f"""
run_qa.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=2
--do_train
--do_eval
--logging_steps 2 --eval_steps 2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
""".split()
with patch.object(UpperCamelCase__ , '''argv''' , UpperCamelCase__ ):
run_qa.main()
SCREAMING_SNAKE_CASE : str = get_results(UpperCamelCase__ )
self.assertGreaterEqual(result['''eval_f1'''] , 30 )
self.assertGreaterEqual(result['''eval_exact'''] , 30 )
| 34
| 1
|
import gc
import unittest
from parameterized import parameterized
from diffusers import FlaxUNetaDConditionModel
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
@slow
@require_flax
class lowercase__ ( unittest.TestCase):
def __A ( self : Tuple , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
return f"""gaussian_noise_s={seed}_shape={'_'.join([str(UpperCamelCase__ ) for s in shape] )}.npy"""
def __A ( self : Dict ):
'''simple docstring'''
super().tearDown()
gc.collect()
def __A ( self : Dict , UpperCamelCase__ : List[Any]=0 , UpperCamelCase__ : Union[str, Any]=(4, 4, 64, 64) , UpperCamelCase__ : Optional[Any]=False ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = jnp.bfloataa if fpaa else jnp.floataa
SCREAMING_SNAKE_CASE : Any = jnp.array(load_hf_numpy(self.get_file_format(UpperCamelCase__ , UpperCamelCase__ ) ) , dtype=UpperCamelCase__ )
return image
def __A ( self : Optional[int] , UpperCamelCase__ : Dict=False , UpperCamelCase__ : List[Any]="CompVis/stable-diffusion-v1-4" ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = jnp.bfloataa if fpaa else jnp.floataa
SCREAMING_SNAKE_CASE : Any = '''bf16''' if fpaa else None
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = FlaxUNetaDConditionModel.from_pretrained(
UpperCamelCase__ , subfolder='''unet''' , dtype=UpperCamelCase__ , revision=UpperCamelCase__ )
return model, params
def __A ( self : List[Any] , UpperCamelCase__ : List[str]=0 , UpperCamelCase__ : Dict=(4, 77, 768) , UpperCamelCase__ : Optional[int]=False ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = jnp.bfloataa if fpaa else jnp.floataa
SCREAMING_SNAKE_CASE : List[Any] = jnp.array(load_hf_numpy(self.get_file_format(UpperCamelCase__ , UpperCamelCase__ ) ) , dtype=UpperCamelCase__ )
return hidden_states
@parameterized.expand(
[
# fmt: off
[83, 4, [-0.2323, -0.1304, 0.0813, -0.3093, -0.0919, -0.1571, -0.1125, -0.5806]],
[17, 0.55, [-0.0831, -0.2443, 0.0901, -0.0919, 0.3396, 0.0103, -0.3743, 0.0701]],
[8, 0.89, [-0.4863, 0.0859, 0.0875, -0.1658, 0.9199, -0.0114, 0.4839, 0.4639]],
[3, 1000, [-0.5649, 0.2402, -0.5518, 0.1248, 1.1328, -0.2443, -0.0325, -1.0078]],
# fmt: on
] )
def __A ( self : Optional[int] , UpperCamelCase__ : int , UpperCamelCase__ : str , UpperCamelCase__ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = self.get_unet_model(model_id='''CompVis/stable-diffusion-v1-4''' , fpaa=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Tuple = self.get_latents(UpperCamelCase__ , fpaa=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = self.get_encoder_hidden_states(UpperCamelCase__ , fpaa=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = model.apply(
{'''params''': params} , UpperCamelCase__ , jnp.array(UpperCamelCase__ , dtype=jnp.intaa ) , encoder_hidden_states=UpperCamelCase__ , ).sample
assert sample.shape == latents.shape
SCREAMING_SNAKE_CASE : List[Any] = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
SCREAMING_SNAKE_CASE : str = jnp.array(UpperCamelCase__ , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware
assert jnp.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[83, 4, [0.1514, 0.0807, 0.1624, 0.1016, -0.1896, 0.0263, 0.0677, 0.2310]],
[17, 0.55, [0.1164, -0.0216, 0.0170, 0.1589, -0.3120, 0.1005, -0.0581, -0.1458]],
[8, 0.89, [-0.1758, -0.0169, 0.1004, -0.1411, 0.1312, 0.1103, -0.1996, 0.2139]],
[3, 1000, [0.1214, 0.0352, -0.0731, -0.1562, -0.0994, -0.0906, -0.2340, -0.0539]],
# fmt: on
] )
def __A ( self : str , UpperCamelCase__ : str , UpperCamelCase__ : Dict , UpperCamelCase__ : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = self.get_unet_model(model_id='''stabilityai/stable-diffusion-2''' , fpaa=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = self.get_latents(UpperCamelCase__ , shape=(4, 4, 96, 96) , fpaa=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_encoder_hidden_states(UpperCamelCase__ , shape=(4, 77, 1024) , fpaa=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = model.apply(
{'''params''': params} , UpperCamelCase__ , jnp.array(UpperCamelCase__ , dtype=jnp.intaa ) , encoder_hidden_states=UpperCamelCase__ , ).sample
assert sample.shape == latents.shape
SCREAMING_SNAKE_CASE : Optional[Any] = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
SCREAMING_SNAKE_CASE : Optional[int] = jnp.array(UpperCamelCase__ , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware
assert jnp.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1E-2 )
| 34
|
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
__UpperCamelCase : Dict = random.Random()
def A ( _lowercase , _lowercase=1.0 , _lowercase=None , _lowercase=None ):
if rng is None:
SCREAMING_SNAKE_CASE : Any = global_rng
SCREAMING_SNAKE_CASE : int = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class lowercase__ ( unittest.TestCase):
def __init__( self : List[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : str=7 , UpperCamelCase__ : Any=400 , UpperCamelCase__ : List[str]=2000 , UpperCamelCase__ : List[Any]=2048 , UpperCamelCase__ : Any=128 , UpperCamelCase__ : Union[str, Any]=1 , UpperCamelCase__ : List[Any]=512 , UpperCamelCase__ : str=30 , UpperCamelCase__ : Tuple=4_4100 , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = parent
SCREAMING_SNAKE_CASE : str = batch_size
SCREAMING_SNAKE_CASE : str = min_seq_length
SCREAMING_SNAKE_CASE : Dict = max_seq_length
SCREAMING_SNAKE_CASE : Optional[int] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
SCREAMING_SNAKE_CASE : Optional[Any] = spectrogram_length
SCREAMING_SNAKE_CASE : Optional[int] = feature_size
SCREAMING_SNAKE_CASE : Tuple = num_audio_channels
SCREAMING_SNAKE_CASE : Union[str, Any] = hop_length
SCREAMING_SNAKE_CASE : List[Any] = chunk_length
SCREAMING_SNAKE_CASE : str = sampling_rate
def __A ( self : Optional[Any] ):
'''simple docstring'''
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def __A ( self : Tuple , UpperCamelCase__ : List[str]=False , UpperCamelCase__ : Optional[int]=False ):
'''simple docstring'''
def _flatten(UpperCamelCase__ : str ):
return list(itertools.chain(*UpperCamelCase__ ) )
if equal_length:
SCREAMING_SNAKE_CASE : List[str] = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
SCREAMING_SNAKE_CASE : int = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
SCREAMING_SNAKE_CASE : Optional[Any] = [np.asarray(UpperCamelCase__ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class lowercase__ ( UpperCamelCase_ , unittest.TestCase):
UpperCamelCase_ = TvltFeatureExtractor
def __A ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = TvltFeatureExtractionTester(self )
def __A ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(UpperCamelCase__ , '''spectrogram_length''' ) )
self.assertTrue(hasattr(UpperCamelCase__ , '''feature_size''' ) )
self.assertTrue(hasattr(UpperCamelCase__ , '''num_audio_channels''' ) )
self.assertTrue(hasattr(UpperCamelCase__ , '''hop_length''' ) )
self.assertTrue(hasattr(UpperCamelCase__ , '''chunk_length''' ) )
self.assertTrue(hasattr(UpperCamelCase__ , '''sampling_rate''' ) )
def __A ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE : Optional[Any] = feat_extract_first.save_pretrained(UpperCamelCase__ )[0]
check_json_file_has_correct_format(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = self.feature_extraction_class.from_pretrained(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = feat_extract_first.to_dict()
SCREAMING_SNAKE_CASE : str = feat_extract_second.to_dict()
SCREAMING_SNAKE_CASE : Optional[int] = dict_first.pop('''mel_filters''' )
SCREAMING_SNAKE_CASE : Optional[int] = dict_second.pop('''mel_filters''' )
self.assertTrue(np.allclose(UpperCamelCase__ , UpperCamelCase__ ) )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
def __A ( self : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE : Tuple = os.path.join(UpperCamelCase__ , '''feat_extract.json''' )
feat_extract_first.to_json_file(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[str] = self.feature_extraction_class.from_json_file(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Any = feat_extract_first.to_dict()
SCREAMING_SNAKE_CASE : str = feat_extract_second.to_dict()
SCREAMING_SNAKE_CASE : int = dict_first.pop('''mel_filters''' )
SCREAMING_SNAKE_CASE : Any = dict_second.pop('''mel_filters''' )
self.assertTrue(np.allclose(UpperCamelCase__ , UpperCamelCase__ ) )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
def __A ( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
SCREAMING_SNAKE_CASE : List[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
SCREAMING_SNAKE_CASE : Optional[int] = [np.asarray(UpperCamelCase__ ) for speech_input in speech_inputs]
# Test not batched input
SCREAMING_SNAKE_CASE : List[Any] = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
SCREAMING_SNAKE_CASE : Optional[Any] = feature_extractor(UpperCamelCase__ , return_tensors='''np''' , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
SCREAMING_SNAKE_CASE : List[str] = feature_extractor(
UpperCamelCase__ , return_tensors='''np''' , sampling_rate=4_4100 , mask_audio=UpperCamelCase__ ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
SCREAMING_SNAKE_CASE : Optional[Any] = [floats_list((1, x) )[0] for x in (800, 800, 800)]
SCREAMING_SNAKE_CASE : Dict = np.asarray(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : str = feature_extractor(UpperCamelCase__ , return_tensors='''np''' , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def __A ( self : Optional[int] , UpperCamelCase__ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
SCREAMING_SNAKE_CASE : Dict = ds.sort('''id''' ).select(range(UpperCamelCase__ ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def __A ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self._load_datasamples(1 )
SCREAMING_SNAKE_CASE : int = TvltFeatureExtractor()
SCREAMING_SNAKE_CASE : List[Any] = feature_extractor(UpperCamelCase__ , return_tensors='''pt''' ).audio_values
self.assertEquals(audio_values.shape , (1, 1, 192, 128) )
SCREAMING_SNAKE_CASE : str = torch.tensor([[-0.3032, -0.2708], [-0.4434, -0.4007]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , UpperCamelCase__ , atol=1E-4 ) )
| 34
| 1
|
import numpy as np
def A ( _lowercase ):
return 1 / (1 + np.exp(-vector ))
def A ( _lowercase ):
return vector * sigmoid(1.702 * vector )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 34
|
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class lowercase__ ( UpperCamelCase_ , UpperCamelCase_):
UpperCamelCase_ = 1
@register_to_config
def __init__( self : List[str] , UpperCamelCase__ : int = 1000 , UpperCamelCase__ : Optional[Union[np.ndarray, List[float]]] = None ):
'''simple docstring'''
self.set_timesteps(UpperCamelCase__ )
# standard deviation of the initial noise distribution
SCREAMING_SNAKE_CASE : str = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
SCREAMING_SNAKE_CASE : Tuple = 4
# running values
SCREAMING_SNAKE_CASE : int = []
def __A ( self : Dict , UpperCamelCase__ : int , UpperCamelCase__ : Union[str, torch.device] = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = num_inference_steps
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1]
SCREAMING_SNAKE_CASE : Tuple = torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
SCREAMING_SNAKE_CASE : int = torch.tensor(self.config.trained_betas , dtype=torch.floataa )
else:
SCREAMING_SNAKE_CASE : Optional[Any] = torch.sin(steps * math.pi / 2 ) ** 2
SCREAMING_SNAKE_CASE : Dict = (1.0 - self.betas**2) ** 0.5
SCREAMING_SNAKE_CASE : Optional[Any] = (torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1]
SCREAMING_SNAKE_CASE : List[str] = timesteps.to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Any = []
def __A ( self : Tuple , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : int , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : bool = True , ):
'''simple docstring'''
if self.num_inference_steps is None:
raise ValueError(
'''Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler''' )
SCREAMING_SNAKE_CASE : Optional[int] = (self.timesteps == timestep).nonzero().item()
SCREAMING_SNAKE_CASE : Union[str, Any] = timestep_index + 1
SCREAMING_SNAKE_CASE : int = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(UpperCamelCase__ )
if len(self.ets ) == 1:
SCREAMING_SNAKE_CASE : Dict = self.ets[-1]
elif len(self.ets ) == 2:
SCREAMING_SNAKE_CASE : Optional[int] = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
SCREAMING_SNAKE_CASE : str = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12
else:
SCREAMING_SNAKE_CASE : Optional[Any] = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4])
SCREAMING_SNAKE_CASE : Optional[int] = self._get_prev_sample(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=UpperCamelCase__ )
def __A ( self : Optional[Any] , UpperCamelCase__ : torch.FloatTensor , *UpperCamelCase__ : List[str] , **UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
return sample
def __A ( self : Union[str, Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Tuple , UpperCamelCase__ : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.alphas[timestep_index]
SCREAMING_SNAKE_CASE : List[str] = self.betas[timestep_index]
SCREAMING_SNAKE_CASE : Union[str, Any] = self.alphas[prev_timestep_index]
SCREAMING_SNAKE_CASE : Tuple = self.betas[prev_timestep_index]
SCREAMING_SNAKE_CASE : Dict = (sample - sigma * ets) / max(UpperCamelCase__ , 1E-8 )
SCREAMING_SNAKE_CASE : Optional[Any] = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self : int ):
'''simple docstring'''
return self.config.num_train_timesteps
| 34
| 1
|
import re
import string
import numpy as np
import datasets
__UpperCamelCase : List[str] = '\nReturns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.\n'
__UpperCamelCase : Optional[Any] = '\nArgs:\n predictions: List of predicted texts.\n references: List of reference texts.\n regexes_to_ignore: List, defaults to None. Regex expressions of characters to\n ignore when calculating the exact matches. Note: these regexes are removed\n from the input data before the changes based on the options below (e.g. ignore_case,\n ignore_punctuation, ignore_numbers) are applied.\n ignore_case: Boolean, defaults to False. If true, turns everything\n to lowercase so that capitalization differences are ignored.\n ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\n ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\nReturns:\n exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.\nExamples:\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 25.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 50.0\n\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 75.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)\n >>> print(round(results["exact_match"], 1))\n 100.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["The cat sat on the mat.", "Theaters are great.", "It\'s like comparing oranges and apples."]\n >>> preds = ["The cat sat on the mat?", "Theaters are great.", "It\'s like comparing apples and oranges."]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 33.3\n\n'
__UpperCamelCase : Union[str, Any] = '\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class lowercase__ ( datasets.Metric):
def __A ( self : Optional[int] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , reference_urls=[] , )
def __A ( self : Optional[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : int=None , UpperCamelCase__ : Optional[int]=False , UpperCamelCase__ : Dict=False , UpperCamelCase__ : Union[str, Any]=False , ):
'''simple docstring'''
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
SCREAMING_SNAKE_CASE : int = np.array([re.sub(UpperCamelCase__ , '''''' , UpperCamelCase__ ) for x in predictions] )
SCREAMING_SNAKE_CASE : List[Any] = np.array([re.sub(UpperCamelCase__ , '''''' , UpperCamelCase__ ) for x in references] )
else:
SCREAMING_SNAKE_CASE : Optional[Any] = np.asarray(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : str = np.asarray(UpperCamelCase__ )
if ignore_case:
SCREAMING_SNAKE_CASE : Dict = np.char.lower(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[str] = np.char.lower(UpperCamelCase__ )
if ignore_punctuation:
SCREAMING_SNAKE_CASE : Optional[int] = string.punctuation.maketrans('''''' , '''''' , string.punctuation )
SCREAMING_SNAKE_CASE : List[str] = np.char.translate(UpperCamelCase__ , table=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = np.char.translate(UpperCamelCase__ , table=UpperCamelCase__ )
if ignore_numbers:
SCREAMING_SNAKE_CASE : Any = string.digits.maketrans('''''' , '''''' , string.digits )
SCREAMING_SNAKE_CASE : Tuple = np.char.translate(UpperCamelCase__ , table=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = np.char.translate(UpperCamelCase__ , table=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Any = predictions == references
return {"exact_match": np.mean(UpperCamelCase__ ) * 100}
| 34
|
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class lowercase__ ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase):
UpperCamelCase_ = IFPipeline
UpperCamelCase_ = TEXT_TO_IMAGE_PARAMS - {"""width""", """height""", """latents"""}
UpperCamelCase_ = TEXT_TO_IMAGE_BATCH_PARAMS
UpperCamelCase_ = PipelineTesterMixin.required_optional_params - {"""latents"""}
def __A ( self : Tuple ):
'''simple docstring'''
return self._get_dummy_components()
def __A ( self : int , UpperCamelCase__ : Dict , UpperCamelCase__ : int=0 ):
'''simple docstring'''
if str(UpperCamelCase__ ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE : Tuple = torch.manual_seed(UpperCamelCase__ )
else:
SCREAMING_SNAKE_CASE : Dict = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : int = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def __A ( self : List[str] ):
'''simple docstring'''
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' )
def __A ( self : Any ):
'''simple docstring'''
super().test_save_load_floataa(expected_max_diff=1E-1 )
def __A ( self : Union[str, Any] ):
'''simple docstring'''
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def __A ( self : List[Any] ):
'''simple docstring'''
self._test_save_load_local()
def __A ( self : List[str] ):
'''simple docstring'''
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def __A ( self : Tuple ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@slow
@require_torch_gpu
class lowercase__ ( unittest.TestCase):
def __A ( self : Optional[Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = IFPipeline.from_pretrained('''DeepFloyd/IF-I-XL-v1.0''' , variant='''fp16''' , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE : str = IFSuperResolutionPipeline.from_pretrained(
'''DeepFloyd/IF-II-L-v1.0''' , variant='''fp16''' , torch_dtype=torch.floataa , text_encoder=UpperCamelCase__ , tokenizer=UpperCamelCase__ )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to('''cuda''' )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = pipe_a.encode_prompt('''anime turtle''' , device='''cuda''' )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
SCREAMING_SNAKE_CASE : Tuple = None
SCREAMING_SNAKE_CASE : str = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
SCREAMING_SNAKE_CASE : Optional[int] = IFImgaImgPipeline(**pipe_a.components )
SCREAMING_SNAKE_CASE : Optional[int] = IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
SCREAMING_SNAKE_CASE : Tuple = IFInpaintingPipeline(**pipe_a.components )
SCREAMING_SNAKE_CASE : Optional[int] = IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def __A ( self : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Dict ):
'''simple docstring'''
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE : List[str] = torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[Any] = pipe_a(
prompt_embeds=UpperCamelCase__ , negative_prompt_embeds=UpperCamelCase__ , num_inference_steps=2 , generator=UpperCamelCase__ , output_type='''np''' , )
SCREAMING_SNAKE_CASE : Optional[int] = output.images[0]
assert image.shape == (64, 64, 3)
SCREAMING_SNAKE_CASE : Tuple = torch.cuda.max_memory_allocated()
assert mem_bytes < 13 * 10**9
SCREAMING_SNAKE_CASE : Optional[Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy''' )
assert_mean_pixel_difference(UpperCamelCase__ , UpperCamelCase__ )
# pipeline 2
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE : Tuple = torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE : int = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Any = pipe_a(
prompt_embeds=UpperCamelCase__ , negative_prompt_embeds=UpperCamelCase__ , image=UpperCamelCase__ , generator=UpperCamelCase__ , num_inference_steps=2 , output_type='''np''' , )
SCREAMING_SNAKE_CASE : Tuple = output.images[0]
assert image.shape == (256, 256, 3)
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
SCREAMING_SNAKE_CASE : int = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy''' )
assert_mean_pixel_difference(UpperCamelCase__ , UpperCamelCase__ )
def __A ( self : Optional[int] , UpperCamelCase__ : str , UpperCamelCase__ : int , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : str ):
'''simple docstring'''
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE : Tuple = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Any = torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE : List[Any] = pipe_a(
prompt_embeds=UpperCamelCase__ , negative_prompt_embeds=UpperCamelCase__ , image=UpperCamelCase__ , num_inference_steps=2 , generator=UpperCamelCase__ , output_type='''np''' , )
SCREAMING_SNAKE_CASE : int = output.images[0]
assert image.shape == (64, 64, 3)
SCREAMING_SNAKE_CASE : Optional[int] = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
SCREAMING_SNAKE_CASE : Tuple = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy''' )
assert_mean_pixel_difference(UpperCamelCase__ , UpperCamelCase__ )
# pipeline 2
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[Any] = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : str = pipe_a(
prompt_embeds=UpperCamelCase__ , negative_prompt_embeds=UpperCamelCase__ , image=UpperCamelCase__ , original_image=UpperCamelCase__ , generator=UpperCamelCase__ , num_inference_steps=2 , output_type='''np''' , )
SCREAMING_SNAKE_CASE : Optional[int] = output.images[0]
assert image.shape == (256, 256, 3)
SCREAMING_SNAKE_CASE : List[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
SCREAMING_SNAKE_CASE : Optional[Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy''' )
assert_mean_pixel_difference(UpperCamelCase__ , UpperCamelCase__ )
def __A ( self : List[str] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE : str = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(1 ) ).to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE : int = pipe_a(
prompt_embeds=UpperCamelCase__ , negative_prompt_embeds=UpperCamelCase__ , image=UpperCamelCase__ , mask_image=UpperCamelCase__ , num_inference_steps=2 , generator=UpperCamelCase__ , output_type='''np''' , )
SCREAMING_SNAKE_CASE : Any = output.images[0]
assert image.shape == (64, 64, 3)
SCREAMING_SNAKE_CASE : Any = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
SCREAMING_SNAKE_CASE : List[Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy''' )
assert_mean_pixel_difference(UpperCamelCase__ , UpperCamelCase__ )
# pipeline 2
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE : str = torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE : List[str] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : int = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = floats_tensor((1, 3, 256, 256) , rng=random.Random(1 ) ).to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Dict = pipe_a(
prompt_embeds=UpperCamelCase__ , negative_prompt_embeds=UpperCamelCase__ , image=UpperCamelCase__ , mask_image=UpperCamelCase__ , original_image=UpperCamelCase__ , generator=UpperCamelCase__ , num_inference_steps=2 , output_type='''np''' , )
SCREAMING_SNAKE_CASE : Optional[Any] = output.images[0]
assert image.shape == (256, 256, 3)
SCREAMING_SNAKE_CASE : Any = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
SCREAMING_SNAKE_CASE : Tuple = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy''' )
assert_mean_pixel_difference(UpperCamelCase__ , UpperCamelCase__ )
def A ( ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 34
| 1
|
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
__UpperCamelCase : Dict = logging.get_logger(__name__)
class lowercase__ ( UpperCamelCase_):
def __init__( self : Any , *UpperCamelCase__ : List[Any] , **UpperCamelCase__ : List[Any] ):
'''simple docstring'''
warnings.warn(
'''The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use MobileViTImageProcessor instead.''' , UpperCamelCase__ , )
super().__init__(*UpperCamelCase__ , **UpperCamelCase__ )
| 34
|
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
__UpperCamelCase : int = logging.get_logger(__name__)
def A ( _lowercase , _lowercase , _lowercase , _lowercase ):
def constraint_to_multiple_of(_lowercase , _lowercase , _lowercase=0 , _lowercase=None ):
SCREAMING_SNAKE_CASE : int = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
SCREAMING_SNAKE_CASE : Dict = math.floor(val / multiple ) * multiple
if x < min_val:
SCREAMING_SNAKE_CASE : Optional[Any] = math.ceil(val / multiple ) * multiple
return x
SCREAMING_SNAKE_CASE : Optional[Any] = (output_size, output_size) if isinstance(_lowercase , _lowercase ) else output_size
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = get_image_size(_lowercase )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = output_size
# determine new height and width
SCREAMING_SNAKE_CASE : Dict = output_height / input_height
SCREAMING_SNAKE_CASE : Optional[Any] = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
SCREAMING_SNAKE_CASE : List[Any] = scale_width
else:
# fit height
SCREAMING_SNAKE_CASE : List[Any] = scale_height
SCREAMING_SNAKE_CASE : List[str] = constraint_to_multiple_of(scale_height * input_height , multiple=_lowercase )
SCREAMING_SNAKE_CASE : Optional[int] = constraint_to_multiple_of(scale_width * input_width , multiple=_lowercase )
return (new_height, new_width)
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = ["""pixel_values"""]
def __init__( self : int , UpperCamelCase__ : bool = True , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : PILImageResampling = PILImageResampling.BILINEAR , UpperCamelCase__ : bool = False , UpperCamelCase__ : int = 1 , UpperCamelCase__ : bool = True , UpperCamelCase__ : Union[int, float] = 1 / 255 , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , **UpperCamelCase__ : Optional[int] , ):
'''simple docstring'''
super().__init__(**UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[str] = size if size is not None else {'''height''': 384, '''width''': 384}
SCREAMING_SNAKE_CASE : Any = get_size_dict(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Dict = do_resize
SCREAMING_SNAKE_CASE : Any = size
SCREAMING_SNAKE_CASE : str = keep_aspect_ratio
SCREAMING_SNAKE_CASE : List[str] = ensure_multiple_of
SCREAMING_SNAKE_CASE : int = resample
SCREAMING_SNAKE_CASE : Any = do_rescale
SCREAMING_SNAKE_CASE : List[Any] = rescale_factor
SCREAMING_SNAKE_CASE : Optional[int] = do_normalize
SCREAMING_SNAKE_CASE : str = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
SCREAMING_SNAKE_CASE : Union[str, Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __A ( self : Optional[Any] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Dict[str, int] , UpperCamelCase__ : bool = False , UpperCamelCase__ : int = 1 , UpperCamelCase__ : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Union[str, Any] , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = get_size_dict(UpperCamelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
SCREAMING_SNAKE_CASE : Any = get_resize_output_image_size(
UpperCamelCase__ , output_size=(size['''height'''], size['''width''']) , keep_aspect_ratio=UpperCamelCase__ , multiple=UpperCamelCase__ , )
return resize(UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def __A ( self : Dict , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Union[int, float] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : str , ):
'''simple docstring'''
return rescale(UpperCamelCase__ , scale=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def __A ( self : Any , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : List[str] , ):
'''simple docstring'''
return normalize(UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def __A ( self : Optional[Any] , UpperCamelCase__ : ImageInput , UpperCamelCase__ : bool = None , UpperCamelCase__ : int = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : int = None , UpperCamelCase__ : PILImageResampling = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : float = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , UpperCamelCase__ : ChannelDimension = ChannelDimension.FIRST , **UpperCamelCase__ : Optional[int] , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE : Optional[Any] = size if size is not None else self.size
SCREAMING_SNAKE_CASE : Union[str, Any] = get_size_dict(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
SCREAMING_SNAKE_CASE : List[str] = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
SCREAMING_SNAKE_CASE : Tuple = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE : Optional[Any] = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE : str = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE : List[Any] = image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE : List[Any] = image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE : Dict = make_list_of_images(UpperCamelCase__ )
if not valid_images(UpperCamelCase__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE : Tuple = [to_numpy_array(UpperCamelCase__ ) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE : Dict = [self.resize(image=UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE : Any = [self.rescale(image=UpperCamelCase__ , scale=UpperCamelCase__ ) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE : Any = [self.normalize(image=UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ ) for image in images]
SCREAMING_SNAKE_CASE : Optional[int] = [to_channel_dimension_format(UpperCamelCase__ , UpperCamelCase__ ) for image in images]
SCREAMING_SNAKE_CASE : Tuple = {'''pixel_values''': images}
return BatchFeature(data=UpperCamelCase__ , tensor_type=UpperCamelCase__ )
def __A ( self : Tuple , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[Tuple] = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(UpperCamelCase__ ) != len(UpperCamelCase__ ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE : List[Any] = target_sizes.numpy()
SCREAMING_SNAKE_CASE : Optional[int] = []
for idx in range(len(UpperCamelCase__ ) ):
SCREAMING_SNAKE_CASE : List[str] = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Dict = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(UpperCamelCase__ )
else:
SCREAMING_SNAKE_CASE : List[Any] = logits.argmax(dim=1 )
SCREAMING_SNAKE_CASE : List[Any] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 34
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__UpperCamelCase : Optional[Any] = {
'configuration_data2vec_audio': ['DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Data2VecAudioConfig'],
'configuration_data2vec_text': [
'DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Data2VecTextConfig',
'Data2VecTextOnnxConfig',
],
'configuration_data2vec_vision': [
'DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Data2VecVisionConfig',
'Data2VecVisionOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : int = [
'DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST',
'Data2VecAudioForAudioFrameClassification',
'Data2VecAudioForCTC',
'Data2VecAudioForSequenceClassification',
'Data2VecAudioForXVector',
'Data2VecAudioModel',
'Data2VecAudioPreTrainedModel',
]
__UpperCamelCase : Optional[int] = [
'DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'Data2VecTextForCausalLM',
'Data2VecTextForMaskedLM',
'Data2VecTextForMultipleChoice',
'Data2VecTextForQuestionAnswering',
'Data2VecTextForSequenceClassification',
'Data2VecTextForTokenClassification',
'Data2VecTextModel',
'Data2VecTextPreTrainedModel',
]
__UpperCamelCase : Any = [
'DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST',
'Data2VecVisionForImageClassification',
'Data2VecVisionForMaskedImageModeling',
'Data2VecVisionForSemanticSegmentation',
'Data2VecVisionModel',
'Data2VecVisionPreTrainedModel',
]
if is_tf_available():
__UpperCamelCase : List[str] = [
'TFData2VecVisionForImageClassification',
'TFData2VecVisionForSemanticSegmentation',
'TFData2VecVisionModel',
'TFData2VecVisionPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig
from .configuration_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecTextConfig,
DataaVecTextOnnxConfig,
)
from .configuration_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecVisionConfig,
DataaVecVisionOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dataavec_audio import (
DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecAudioForAudioFrameClassification,
DataaVecAudioForCTC,
DataaVecAudioForSequenceClassification,
DataaVecAudioForXVector,
DataaVecAudioModel,
DataaVecAudioPreTrainedModel,
)
from .modeling_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecTextForCausalLM,
DataaVecTextForMaskedLM,
DataaVecTextForMultipleChoice,
DataaVecTextForQuestionAnswering,
DataaVecTextForSequenceClassification,
DataaVecTextForTokenClassification,
DataaVecTextModel,
DataaVecTextPreTrainedModel,
)
from .modeling_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecVisionForImageClassification,
DataaVecVisionForMaskedImageModeling,
DataaVecVisionForSemanticSegmentation,
DataaVecVisionModel,
DataaVecVisionPreTrainedModel,
)
if is_tf_available():
from .modeling_tf_dataavec_vision import (
TFDataaVecVisionForImageClassification,
TFDataaVecVisionForSemanticSegmentation,
TFDataaVecVisionModel,
TFDataaVecVisionPreTrainedModel,
)
else:
import sys
__UpperCamelCase : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 34
|
import random
def A ( _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = [], [], []
for element in data:
if element < pivot:
less.append(_lowercase )
elif element > pivot:
greater.append(_lowercase )
else:
equal.append(_lowercase )
return less, equal, greater
def A ( _lowercase , _lowercase ):
# index = len(items) // 2 when trying to find the median
# (value of index when items is sorted)
# invalid input
if index >= len(_lowercase ) or index < 0:
return None
SCREAMING_SNAKE_CASE : Dict = items[random.randint(0 , len(_lowercase ) - 1 )]
SCREAMING_SNAKE_CASE : int = 0
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = _partition(_lowercase , _lowercase )
SCREAMING_SNAKE_CASE : List[Any] = len(_lowercase )
SCREAMING_SNAKE_CASE : Optional[Any] = len(_lowercase )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(_lowercase , _lowercase )
# must be in larger
else:
return quick_select(_lowercase , index - (m + count) )
| 34
| 1
|
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class lowercase__ ( UpperCamelCase_ , unittest.TestCase):
UpperCamelCase_ = CTRLTokenizer
UpperCamelCase_ = False
UpperCamelCase_ = False
def __A ( self : Any ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
SCREAMING_SNAKE_CASE : Optional[Any] = ['''adapt''', '''re@@''', '''a@@''', '''apt''', '''c@@''', '''t''', '''<unk>''']
SCREAMING_SNAKE_CASE : str = dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__ ) ) ) )
SCREAMING_SNAKE_CASE : Any = ['''#version: 0.2''', '''a p''', '''ap t</w>''', '''r e''', '''a d''', '''ad apt</w>''', '''''']
SCREAMING_SNAKE_CASE : Dict = {'''unk_token''': '''<unk>'''}
SCREAMING_SNAKE_CASE : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
SCREAMING_SNAKE_CASE : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(UpperCamelCase__ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(UpperCamelCase__ ) )
def __A ( self : List[str] , **UpperCamelCase__ : Dict ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def __A ( self : Union[str, Any] , UpperCamelCase__ : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = '''adapt react readapt apt'''
SCREAMING_SNAKE_CASE : Union[str, Any] = '''adapt react readapt apt'''
return input_text, output_text
def __A ( self : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
SCREAMING_SNAKE_CASE : Any = '''adapt react readapt apt'''
SCREAMING_SNAKE_CASE : Optional[int] = '''adapt re@@ a@@ c@@ t re@@ adapt apt'''.split()
SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.tokenize(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : str = tokens + [tokenizer.unk_token]
SCREAMING_SNAKE_CASE : Union[str, Any] = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , UpperCamelCase__ )
| 34
|
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : Tuple = logging.get_logger(__name__)
# TODO Update this
__UpperCamelCase : List[str] = {
'facebook/esm-1b': 'https://huggingface.co/facebook/esm-1b/resolve/main/config.json',
# See all ESM models at https://huggingface.co/models?filter=esm
}
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = """esm"""
def __init__( self : Tuple , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : int=None , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : Any=768 , UpperCamelCase__ : Optional[Any]=12 , UpperCamelCase__ : str=12 , UpperCamelCase__ : Optional[int]=3072 , UpperCamelCase__ : Optional[Any]=0.1 , UpperCamelCase__ : Any=0.1 , UpperCamelCase__ : Union[str, Any]=1026 , UpperCamelCase__ : Dict=0.02 , UpperCamelCase__ : Any=1E-12 , UpperCamelCase__ : Dict="absolute" , UpperCamelCase__ : Optional[Any]=True , UpperCamelCase__ : int=None , UpperCamelCase__ : Optional[int]=False , UpperCamelCase__ : Tuple=False , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : Optional[Any]=None , **UpperCamelCase__ : Any , ):
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase__ , mask_token_id=UpperCamelCase__ , **UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = vocab_size
SCREAMING_SNAKE_CASE : Any = hidden_size
SCREAMING_SNAKE_CASE : Union[str, Any] = num_hidden_layers
SCREAMING_SNAKE_CASE : Dict = num_attention_heads
SCREAMING_SNAKE_CASE : Any = intermediate_size
SCREAMING_SNAKE_CASE : str = hidden_dropout_prob
SCREAMING_SNAKE_CASE : List[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : str = max_position_embeddings
SCREAMING_SNAKE_CASE : Tuple = initializer_range
SCREAMING_SNAKE_CASE : Optional[int] = layer_norm_eps
SCREAMING_SNAKE_CASE : Dict = position_embedding_type
SCREAMING_SNAKE_CASE : Any = use_cache
SCREAMING_SNAKE_CASE : Dict = emb_layer_norm_before
SCREAMING_SNAKE_CASE : List[str] = token_dropout
SCREAMING_SNAKE_CASE : List[Any] = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info('''No esmfold_config supplied for folding model, using default values.''' )
SCREAMING_SNAKE_CASE : List[Any] = EsmFoldConfig()
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE : Union[str, Any] = EsmFoldConfig(**UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = esmfold_config
if vocab_list is None:
logger.warning('''No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!''' )
SCREAMING_SNAKE_CASE : Optional[int] = get_default_vocab_list()
else:
SCREAMING_SNAKE_CASE : Optional[Any] = vocab_list
else:
SCREAMING_SNAKE_CASE : str = None
SCREAMING_SNAKE_CASE : int = None
if self.esmfold_config is not None and getattr(self.esmfold_config , '''use_esm_attn_map''' , UpperCamelCase__ ):
raise ValueError('''The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!''' )
def __A ( self : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = super().to_dict()
if isinstance(self.esmfold_config , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE : Optional[Any] = self.esmfold_config.to_dict()
return output
@dataclass
class lowercase__ :
UpperCamelCase_ = None
UpperCamelCase_ = True
UpperCamelCase_ = False
UpperCamelCase_ = False
UpperCamelCase_ = False
UpperCamelCase_ = 0
UpperCamelCase_ = True
UpperCamelCase_ = False
UpperCamelCase_ = 128
UpperCamelCase_ = None
def __A ( self : Optional[int] ):
'''simple docstring'''
if self.trunk is None:
SCREAMING_SNAKE_CASE : Optional[Any] = TrunkConfig()
elif isinstance(self.trunk , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE : Tuple = TrunkConfig(**self.trunk )
def __A ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = asdict(self )
SCREAMING_SNAKE_CASE : Tuple = self.trunk.to_dict()
return output
@dataclass
class lowercase__ :
UpperCamelCase_ = 48
UpperCamelCase_ = 1_024
UpperCamelCase_ = 128
UpperCamelCase_ = 32
UpperCamelCase_ = 32
UpperCamelCase_ = 32
UpperCamelCase_ = 0
UpperCamelCase_ = 0
UpperCamelCase_ = False
UpperCamelCase_ = 4
UpperCamelCase_ = 128
UpperCamelCase_ = None
def __A ( self : Any ):
'''simple docstring'''
if self.structure_module is None:
SCREAMING_SNAKE_CASE : Optional[int] = StructureModuleConfig()
elif isinstance(self.structure_module , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE : Optional[Any] = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(f"""`max_recycles` should be positive, got {self.max_recycles}.""" )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
'''`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got'''
f""" {self.sequence_state_dim} and {self.sequence_state_dim}.""" )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
'''`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got'''
f""" {self.pairwise_state_dim} and {self.pairwise_state_dim}.""" )
SCREAMING_SNAKE_CASE : Dict = self.sequence_state_dim // self.sequence_head_width
SCREAMING_SNAKE_CASE : Tuple = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
'''`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got'''
f""" {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.""" )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
'''`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got'''
f""" {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.""" )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(f"""`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.""" )
if self.dropout >= 0.4:
raise ValueError(f"""`dropout` should not be greater than 0.4, got {self.dropout}.""" )
def __A ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = asdict(self )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.structure_module.to_dict()
return output
@dataclass
class lowercase__ :
UpperCamelCase_ = 384
UpperCamelCase_ = 128
UpperCamelCase_ = 16
UpperCamelCase_ = 128
UpperCamelCase_ = 12
UpperCamelCase_ = 4
UpperCamelCase_ = 8
UpperCamelCase_ = 0.1
UpperCamelCase_ = 8
UpperCamelCase_ = 1
UpperCamelCase_ = 2
UpperCamelCase_ = 7
UpperCamelCase_ = 10
UpperCamelCase_ = 1E-8
UpperCamelCase_ = 1E5
def __A ( self : Dict ):
'''simple docstring'''
return asdict(self )
def A ( ):
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 34
| 1
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__UpperCamelCase : int = logging.get_logger(__name__)
__UpperCamelCase : Optional[Any] = {
'microsoft/swin-tiny-patch4-window7-224': (
'https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json'
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class lowercase__ ( UpperCamelCase_ , UpperCamelCase_):
UpperCamelCase_ = """swin"""
UpperCamelCase_ = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self : str , UpperCamelCase__ : Optional[int]=224 , UpperCamelCase__ : Optional[int]=4 , UpperCamelCase__ : Dict=3 , UpperCamelCase__ : Any=96 , UpperCamelCase__ : Union[str, Any]=[2, 2, 6, 2] , UpperCamelCase__ : Any=[3, 6, 12, 24] , UpperCamelCase__ : Tuple=7 , UpperCamelCase__ : Optional[Any]=4.0 , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : str=0.0 , UpperCamelCase__ : Optional[Any]=0.0 , UpperCamelCase__ : str=0.1 , UpperCamelCase__ : List[Any]="gelu" , UpperCamelCase__ : List[Any]=False , UpperCamelCase__ : int=0.02 , UpperCamelCase__ : Tuple=1E-5 , UpperCamelCase__ : Optional[int]=32 , UpperCamelCase__ : Tuple=None , UpperCamelCase__ : Any=None , **UpperCamelCase__ : str , ):
'''simple docstring'''
super().__init__(**UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[str] = image_size
SCREAMING_SNAKE_CASE : Union[str, Any] = patch_size
SCREAMING_SNAKE_CASE : str = num_channels
SCREAMING_SNAKE_CASE : Optional[int] = embed_dim
SCREAMING_SNAKE_CASE : Dict = depths
SCREAMING_SNAKE_CASE : List[str] = len(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = num_heads
SCREAMING_SNAKE_CASE : int = window_size
SCREAMING_SNAKE_CASE : Optional[int] = mlp_ratio
SCREAMING_SNAKE_CASE : Dict = qkv_bias
SCREAMING_SNAKE_CASE : int = hidden_dropout_prob
SCREAMING_SNAKE_CASE : int = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : int = drop_path_rate
SCREAMING_SNAKE_CASE : List[str] = hidden_act
SCREAMING_SNAKE_CASE : str = use_absolute_embeddings
SCREAMING_SNAKE_CASE : Optional[int] = layer_norm_eps
SCREAMING_SNAKE_CASE : Union[str, Any] = initializer_range
SCREAMING_SNAKE_CASE : Any = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
SCREAMING_SNAKE_CASE : List[Any] = int(embed_dim * 2 ** (len(UpperCamelCase__ ) - 1) )
SCREAMING_SNAKE_CASE : int = ['''stem'''] + [f"""stage{idx}""" for idx in range(1 , len(UpperCamelCase__ ) + 1 )]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = get_aligned_output_features_output_indices(
out_features=UpperCamelCase__ , out_indices=UpperCamelCase__ , stage_names=self.stage_names )
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = version.parse("""1.11""")
@property
def __A ( self : Union[str, Any] ):
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def __A ( self : Optional[int] ):
'''simple docstring'''
return 1E-4
| 34
|
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("""TEST_SAGEMAKER""" , """False""")) is not True , reason="""Skipping test because should only be run when releasing minor transformers version""" , )
@pytest.mark.usefixtures("""sm_env""")
@parameterized_class(
[
{
"""framework""": """pytorch""",
"""script""": """run_glue.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.p3.16xlarge""",
"""results""": {"""train_runtime""": 650, """eval_accuracy""": 0.7, """eval_loss""": 0.6},
},
{
"""framework""": """pytorch""",
"""script""": """run_ddp.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.p3.16xlarge""",
"""results""": {"""train_runtime""": 600, """eval_accuracy""": 0.7, """eval_loss""": 0.6},
},
{
"""framework""": """tensorflow""",
"""script""": """run_tf_dist.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.p3.16xlarge""",
"""results""": {"""train_runtime""": 600, """eval_accuracy""": 0.6, """eval_loss""": 0.7},
},
])
class lowercase__ ( unittest.TestCase):
def __A ( self : Any ):
'''simple docstring'''
if self.framework == "pytorch":
subprocess.run(
f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding='''utf-8''' , check=UpperCamelCase__ , )
assert hasattr(self , '''env''' )
def __A ( self : str , UpperCamelCase__ : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = f"""{self.env.base_job_name}-{instance_count}-{'ddp' if 'ddp' in self.script else 'smd'}"""
# distributed data settings
SCREAMING_SNAKE_CASE : Any = {'''smdistributed''': {'''dataparallel''': {'''enabled''': True}}} if self.script != '''run_ddp.py''' else None
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=UpperCamelCase__ , instance_count=UpperCamelCase__ , instance_type=self.instance_type , debugger_hook_config=UpperCamelCase__ , hyperparameters={**self.env.distributed_hyperparameters, '''model_name_or_path''': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=UpperCamelCase__ , py_version='''py36''' , )
def __A ( self : Optional[Any] , UpperCamelCase__ : List[str] ):
'''simple docstring'''
TrainingJobAnalytics(UpperCamelCase__ ).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(2,)] )
def __A ( self : Tuple , UpperCamelCase__ : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.create_estimator(UpperCamelCase__ )
# run training
estimator.fit()
# result dataframe
SCREAMING_SNAKE_CASE : Optional[Any] = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
SCREAMING_SNAKE_CASE : List[str] = list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''] )
SCREAMING_SNAKE_CASE : Dict = list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
SCREAMING_SNAKE_CASE : List[Any] = (
Session().describe_training_job(estimator.latest_training_job.name ).get('''TrainingTimeInSeconds''' , 99_9999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy )
assert all(t <= self.results['''eval_loss'''] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f"""{estimator.latest_training_job.name}.json""" , '''w''' ) as outfile:
json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} , UpperCamelCase__ )
| 34
| 1
|
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def A ( _lowercase ):
SCREAMING_SNAKE_CASE : List[Any] = [2, 2, 6, 2] if '''tiny''' in model_name else [2, 2, 18, 2]
SCREAMING_SNAKE_CASE : Any = True if '''large''' in model_name or '''huge''' in model_name else False
SCREAMING_SNAKE_CASE : Optional[int] = True if '''large''' in model_name or '''huge''' in model_name else False
SCREAMING_SNAKE_CASE : Optional[Any] = True if '''large''' in model_name or '''huge''' in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
SCREAMING_SNAKE_CASE : List[str] = [3, 3, 3, 3]
SCREAMING_SNAKE_CASE : List[str] = [5, 5, 5, 5]
elif "fl4" in model_name:
SCREAMING_SNAKE_CASE : Any = [4, 4, 4, 4]
SCREAMING_SNAKE_CASE : str = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
SCREAMING_SNAKE_CASE : Optional[Any] = [3, 3, 3, 3]
if "lrf" in model_name:
SCREAMING_SNAKE_CASE : Tuple = [3, 3, 3, 3]
else:
SCREAMING_SNAKE_CASE : Any = [2, 2, 2, 2]
if "tiny" in model_name:
SCREAMING_SNAKE_CASE : Tuple = 96
elif "small" in model_name:
SCREAMING_SNAKE_CASE : List[Any] = 96
elif "base" in model_name:
SCREAMING_SNAKE_CASE : Tuple = 128
elif "large" in model_name:
SCREAMING_SNAKE_CASE : Optional[int] = 192
elif "xlarge" in model_name:
SCREAMING_SNAKE_CASE : Tuple = 256
elif "huge" in model_name:
SCREAMING_SNAKE_CASE : List[str] = 352
# set label information
SCREAMING_SNAKE_CASE : Dict = '''huggingface/label-files'''
if "large" in model_name or "huge" in model_name:
SCREAMING_SNAKE_CASE : Any = '''imagenet-22k-id2label.json'''
else:
SCREAMING_SNAKE_CASE : Dict = '''imagenet-1k-id2label.json'''
SCREAMING_SNAKE_CASE : Dict = json.load(open(hf_hub_download(_lowercase , _lowercase , repo_type='''dataset''' ) , '''r''' ) )
SCREAMING_SNAKE_CASE : Dict = {int(_lowercase ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE : Dict = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE : List[Any] = FocalNetConfig(
embed_dim=_lowercase , depths=_lowercase , focal_levels=_lowercase , focal_windows=_lowercase , use_conv_embed=_lowercase , idalabel=_lowercase , labelaid=_lowercase , use_post_layernorm=_lowercase , use_layerscale=_lowercase , )
return config
def A ( _lowercase ):
if "patch_embed.proj" in name:
SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
SCREAMING_SNAKE_CASE : List[str] = name.replace('''patch_embed.norm''' , '''embeddings.norm''' )
if "layers" in name:
SCREAMING_SNAKE_CASE : int = '''encoder.''' + name
if "encoder.layers" in name:
SCREAMING_SNAKE_CASE : Optional[Any] = name.replace('''encoder.layers''' , '''encoder.stages''' )
if "downsample.proj" in name:
SCREAMING_SNAKE_CASE : Optional[int] = name.replace('''downsample.proj''' , '''downsample.projection''' )
if "blocks" in name:
SCREAMING_SNAKE_CASE : int = name.replace('''blocks''' , '''layers''' )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
SCREAMING_SNAKE_CASE : List[str] = name.replace('''modulation.f''' , '''modulation.projection_in''' )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
SCREAMING_SNAKE_CASE : Optional[Any] = name.replace('''modulation.h''' , '''modulation.projection_context''' )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
SCREAMING_SNAKE_CASE : str = name.replace('''modulation.proj''' , '''modulation.projection_out''' )
if name == "norm.weight":
SCREAMING_SNAKE_CASE : Union[str, Any] = '''layernorm.weight'''
if name == "norm.bias":
SCREAMING_SNAKE_CASE : Tuple = '''layernorm.bias'''
if "head" in name:
SCREAMING_SNAKE_CASE : str = name.replace('''head''' , '''classifier''' )
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = '''focalnet.''' + name
return name
def A ( _lowercase , _lowercase , _lowercase=False ):
# fmt: off
SCREAMING_SNAKE_CASE : Tuple = {
'''focalnet-tiny''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth''',
'''focalnet-tiny-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth''',
'''focalnet-small''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth''',
'''focalnet-small-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth''',
'''focalnet-base''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth''',
'''focalnet-base-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth''',
'''focalnet-large-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth''',
'''focalnet-large-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth''',
'''focalnet-xlarge-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth''',
'''focalnet-xlarge-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth''',
}
# fmt: on
SCREAMING_SNAKE_CASE : Optional[Any] = model_name_to_url[model_name]
print('''Checkpoint URL: ''' , _lowercase )
SCREAMING_SNAKE_CASE : Dict = torch.hub.load_state_dict_from_url(_lowercase , map_location='''cpu''' )['''model''']
# rename keys
for key in state_dict.copy().keys():
SCREAMING_SNAKE_CASE : List[str] = state_dict.pop(_lowercase )
SCREAMING_SNAKE_CASE : Any = val
SCREAMING_SNAKE_CASE : Any = get_focalnet_config(_lowercase )
SCREAMING_SNAKE_CASE : Dict = FocalNetForImageClassification(_lowercase )
model.eval()
# load state dict
model.load_state_dict(_lowercase )
# verify conversion
SCREAMING_SNAKE_CASE : str = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
SCREAMING_SNAKE_CASE : str = BitImageProcessor(
do_resize=_lowercase , size={'''shortest_edge''': 256} , resample=PILImageResampling.BILINEAR , do_center_crop=_lowercase , crop_size=224 , do_normalize=_lowercase , image_mean=_lowercase , image_std=_lowercase , )
SCREAMING_SNAKE_CASE : str = Image.open(requests.get(_lowercase , stream=_lowercase ).raw )
SCREAMING_SNAKE_CASE : str = processor(images=_lowercase , return_tensors='''pt''' )
SCREAMING_SNAKE_CASE : List[Any] = transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ),
] )
SCREAMING_SNAKE_CASE : Optional[int] = image_transforms(_lowercase ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , _lowercase , atol=1e-4 )
SCREAMING_SNAKE_CASE : Dict = model(**_lowercase )
SCREAMING_SNAKE_CASE : Tuple = outputs.logits.argmax(-1 ).item()
print('''Predicted class:''' , model.config.idalabel[predicted_class_idx] )
print('''First values of logits:''' , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([0.2166, -0.4368, 0.2191] )
elif model_name == "focalnet-tiny-lrf":
SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([1.1669, 0.0125, -0.1695] )
elif model_name == "focalnet-small":
SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([0.4917, -0.0430, 0.1341] )
elif model_name == "focalnet-small-lrf":
SCREAMING_SNAKE_CASE : List[str] = torch.tensor([-0.2588, -0.5342, -0.2331] )
elif model_name == "focalnet-base":
SCREAMING_SNAKE_CASE : Tuple = torch.tensor([-0.1655, -0.4090, -0.1730] )
elif model_name == "focalnet-base-lrf":
SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([0.5306, -0.0483, -0.3928] )
assert torch.allclose(outputs.logits[0, :3] , _lowercase , atol=1e-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f"""Saving model and processor of {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowercase )
processor.save_pretrained(_lowercase )
if push_to_hub:
print(f"""Pushing model and processor of {model_name} to the hub...""" )
model.push_to_hub(f"""{model_name}""" )
processor.push_to_hub(f"""{model_name}""" )
if __name__ == "__main__":
__UpperCamelCase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='focalnet-tiny',
type=str,
help='Name of the FocalNet model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub.',
)
__UpperCamelCase : Dict = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 34
|
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
__UpperCamelCase : Dict = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
__UpperCamelCase : Tuple = [file for file in filepaths if file != file.lower()]
if upper_files:
print(f"""{len(upper_files)} files contain uppercase characters:""")
print('\n'.join(upper_files) + '\n')
__UpperCamelCase : List[Any] = [file for file in filepaths if ' ' in file]
if space_files:
print(f"""{len(space_files)} files contain space characters:""")
print('\n'.join(space_files) + '\n')
__UpperCamelCase : List[Any] = [file for file in filepaths if '-' in file]
if hyphen_files:
print(f"""{len(hyphen_files)} files contain hyphen characters:""")
print('\n'.join(hyphen_files) + '\n')
__UpperCamelCase : List[Any] = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(f"""{len(nodir_files)} files are not in a directory:""")
print('\n'.join(nodir_files) + '\n')
__UpperCamelCase : Optional[Any] = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 34
| 1
|
from decimal import Decimal, getcontext
from math import ceil, factorial
def A ( _lowercase ):
if not isinstance(_lowercase , _lowercase ):
raise TypeError('''Undefined for non-integers''' )
elif precision < 1:
raise ValueError('''Undefined for non-natural numbers''' )
SCREAMING_SNAKE_CASE : List[str] = precision
SCREAMING_SNAKE_CASE : str = ceil(precision / 14 )
SCREAMING_SNAKE_CASE : Tuple = 426_880 * Decimal(10_005 ).sqrt()
SCREAMING_SNAKE_CASE : Tuple = 1
SCREAMING_SNAKE_CASE : int = 13_591_409
SCREAMING_SNAKE_CASE : Optional[int] = Decimal(_lowercase )
for k in range(1 , _lowercase ):
SCREAMING_SNAKE_CASE : Any = factorial(6 * k ) // (factorial(3 * k ) * factorial(_lowercase ) ** 3)
linear_term += 545_140_134
exponential_term *= -262_537_412_640_768_000
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
__UpperCamelCase : List[str] = 50
print(f"""The first {n} digits of pi is: {pi(n)}""")
| 34
|
import os
import re
import warnings
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_ta import TaTokenizer
else:
__UpperCamelCase : Dict = None
__UpperCamelCase : Tuple = logging.get_logger(__name__)
__UpperCamelCase : Optional[int] = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
__UpperCamelCase : Optional[int] = {
'vocab_file': {
't5-small': 'https://huggingface.co/t5-small/resolve/main/spiece.model',
't5-base': 'https://huggingface.co/t5-base/resolve/main/spiece.model',
't5-large': 'https://huggingface.co/t5-large/resolve/main/spiece.model',
't5-3b': 'https://huggingface.co/t5-3b/resolve/main/spiece.model',
't5-11b': 'https://huggingface.co/t5-11b/resolve/main/spiece.model',
},
'tokenizer_file': {
't5-small': 'https://huggingface.co/t5-small/resolve/main/tokenizer.json',
't5-base': 'https://huggingface.co/t5-base/resolve/main/tokenizer.json',
't5-large': 'https://huggingface.co/t5-large/resolve/main/tokenizer.json',
't5-3b': 'https://huggingface.co/t5-3b/resolve/main/tokenizer.json',
't5-11b': 'https://huggingface.co/t5-11b/resolve/main/tokenizer.json',
},
}
# TODO(PVP) - this should be removed in Transformers v5
__UpperCamelCase : Union[str, Any] = {
't5-small': 512,
't5-base': 512,
't5-large': 512,
't5-3b': 512,
't5-11b': 512,
}
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = VOCAB_FILES_NAMES
UpperCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ = ["""input_ids""", """attention_mask"""]
UpperCamelCase_ = TaTokenizer
UpperCamelCase_ = []
def __init__( self : str , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : Dict=None , UpperCamelCase__ : str="</s>" , UpperCamelCase__ : str="<unk>" , UpperCamelCase__ : Optional[int]="<pad>" , UpperCamelCase__ : Optional[Any]=100 , UpperCamelCase__ : List[Any]=None , **UpperCamelCase__ : str , ):
'''simple docstring'''
if extra_ids > 0 and additional_special_tokens is None:
SCREAMING_SNAKE_CASE : List[str] = [f"""<extra_id_{i}>""" for i in range(UpperCamelCase__ )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra special tokens
SCREAMING_SNAKE_CASE : int = len(set(filter(lambda UpperCamelCase__ : bool('''extra_id_''' in str(UpperCamelCase__ ) ) , UpperCamelCase__ ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f"""Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"""
''' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids'''
''' tokens''' )
super().__init__(
UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , extra_ids=UpperCamelCase__ , additional_special_tokens=UpperCamelCase__ , **UpperCamelCase__ , )
SCREAMING_SNAKE_CASE : str = vocab_file
SCREAMING_SNAKE_CASE : int = False if not self.vocab_file else True
SCREAMING_SNAKE_CASE : str = extra_ids
@staticmethod
def __A ( UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes:
SCREAMING_SNAKE_CASE : List[str] = TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
'''This tokenizer was incorrectly instantiated with a model max length of'''
f""" {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this"""
''' behavior is kept to avoid breaking backwards compatibility when padding/encoding with'''
''' `truncation is True`.\n- Be aware that you SHOULD NOT rely on'''
f""" {pretrained_model_name_or_path} automatically truncating your input to"""
f""" {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences"""
f""" longer than {deprecated_max_model_length} you can either instantiate this tokenizer with"""
''' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please'''
''' instantiate this tokenizer with `model_max_length` set to your preferred value.''' , UpperCamelCase__ , )
return max_model_length
def __A ( self : Tuple , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(UpperCamelCase__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
SCREAMING_SNAKE_CASE : Any = os.path.join(
UpperCamelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ):
copyfile(self.vocab_file , UpperCamelCase__ )
logger.info(f"""Copy vocab file to {out_vocab_file}""" )
return (out_vocab_file,)
def __A ( self : Optional[Any] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return self.prefix_tokens + token_ids_a
else:
SCREAMING_SNAKE_CASE : Tuple = token_ids_a + [self.eos_token_id]
return self.prefix_tokens + token_ids_a + token_ids_a
def __A ( self : Any , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def __A ( self : Dict ):
'''simple docstring'''
return list(
set(filter(lambda UpperCamelCase__ : bool(re.search(r'''<extra_id_\d+>''' , UpperCamelCase__ ) ) is not None , self.additional_special_tokens ) ) )
def __A ( self : List[Any] ):
'''simple docstring'''
return [self.convert_tokens_to_ids(UpperCamelCase__ ) for token in self.get_sentinel_tokens()]
| 34
| 1
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__UpperCamelCase : List[Any] = logging.get_logger(__name__)
__UpperCamelCase : Union[str, Any] = '▁'
__UpperCamelCase : Tuple = {'vocab_file': 'sentencepiece.bpe.model', 'monolingual_vocab_file': 'dict.txt'}
__UpperCamelCase : Tuple = {
'vocab_file': {
'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model',
},
'monolingual_vocab_file': {
'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt',
},
}
__UpperCamelCase : Tuple = {'vinai/bartpho-syllable': 1024}
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = VOCAB_FILES_NAMES
UpperCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ = ["""input_ids""", """attention_mask"""]
def __init__( self : Dict , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : Dict="<s>" , UpperCamelCase__ : str="</s>" , UpperCamelCase__ : Dict="</s>" , UpperCamelCase__ : Tuple="<s>" , UpperCamelCase__ : str="<unk>" , UpperCamelCase__ : Dict="<pad>" , UpperCamelCase__ : Tuple="<mask>" , UpperCamelCase__ : Optional[Dict[str, Any]] = None , **UpperCamelCase__ : Tuple , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else mask_token
SCREAMING_SNAKE_CASE : Tuple = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase__ , )
SCREAMING_SNAKE_CASE : Optional[int] = vocab_file
SCREAMING_SNAKE_CASE : Any = monolingual_vocab_file
SCREAMING_SNAKE_CASE : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(UpperCamelCase__ ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
SCREAMING_SNAKE_CASE : Dict = {}
SCREAMING_SNAKE_CASE : List[Any] = 0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(UpperCamelCase__ ) not in self.fairseq_tokens_to_ids:
SCREAMING_SNAKE_CASE : str = cnt
cnt += 1
with open(UpperCamelCase__ , '''r''' , encoding='''utf-8''' ) as f:
for line in f.readlines():
SCREAMING_SNAKE_CASE : Union[str, Any] = line.strip().split()[0]
SCREAMING_SNAKE_CASE : int = len(self.fairseq_tokens_to_ids )
if str(UpperCamelCase__ ) not in self.fairseq_tokens_to_ids:
SCREAMING_SNAKE_CASE : Any = len(self.fairseq_tokens_to_ids )
SCREAMING_SNAKE_CASE : Optional[int] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.__dict__.copy()
SCREAMING_SNAKE_CASE : Tuple = None
SCREAMING_SNAKE_CASE : str = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Optional[Any] , UpperCamelCase__ : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
SCREAMING_SNAKE_CASE : List[Any] = {}
SCREAMING_SNAKE_CASE : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def __A ( self : Optional[int] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
SCREAMING_SNAKE_CASE : Dict = [self.cls_token_id]
SCREAMING_SNAKE_CASE : Any = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __A ( self : Tuple , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None , UpperCamelCase__ : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase__ , token_ids_a=UpperCamelCase__ , already_has_special_tokens=UpperCamelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(UpperCamelCase__ )) + [1]
return [1] + ([0] * len(UpperCamelCase__ )) + [1, 1] + ([0] * len(UpperCamelCase__ )) + [1]
def __A ( self : int , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = [self.sep_token_id]
SCREAMING_SNAKE_CASE : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def __A ( self : int ):
'''simple docstring'''
return len(self.fairseq_ids_to_tokens )
def __A ( self : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = {self.convert_ids_to_tokens(UpperCamelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __A ( self : List[str] , UpperCamelCase__ : str ):
'''simple docstring'''
return self.sp_model.encode(UpperCamelCase__ , out_type=UpperCamelCase__ )
def __A ( self : Optional[int] , UpperCamelCase__ : Optional[int] ):
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def __A ( self : str , UpperCamelCase__ : List[Any] ):
'''simple docstring'''
return self.fairseq_ids_to_tokens[index]
def __A ( self : Any , UpperCamelCase__ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = ''''''.join(UpperCamelCase__ ).replace(UpperCamelCase__ , ''' ''' ).strip()
return out_string
def __A ( self : Optional[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(UpperCamelCase__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
SCREAMING_SNAKE_CASE : List[Any] = os.path.join(
UpperCamelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
SCREAMING_SNAKE_CASE : Dict = os.path.join(
UpperCamelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''monolingual_vocab_file'''] , )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCamelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase__ , '''wb''' ) as fi:
SCREAMING_SNAKE_CASE : Any = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase__ )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
UpperCamelCase__ ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file , UpperCamelCase__ )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(UpperCamelCase__ , '''w''' , encoding='''utf-8''' ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(f"""{str(UpperCamelCase__ )} \n""" )
return out_vocab_file, out_monolingual_vocab_file
| 34
|
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
__UpperCamelCase : str = False
class lowercase__ ( unittest.TestCase):
pass
@nightly
@require_torch_gpu
class lowercase__ ( unittest.TestCase):
def __A ( self : Union[str, Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
SCREAMING_SNAKE_CASE : Optional[Any] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Dict = pipe.dual_guided(
prompt='''first prompt''' , image=UpperCamelCase__ , text_to_image_strength=0.75 , generator=UpperCamelCase__ , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = VersatileDiffusionPipeline.from_pretrained(UpperCamelCase__ , torch_dtype=torch.floataa )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = generator.manual_seed(0 )
SCREAMING_SNAKE_CASE : int = pipe.dual_guided(
prompt='''first prompt''' , image=UpperCamelCase__ , text_to_image_strength=0.75 , generator=UpperCamelCase__ , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def __A ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = '''cyberpunk 2077'''
SCREAMING_SNAKE_CASE : Tuple = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
SCREAMING_SNAKE_CASE : List[Any] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Tuple = pipe.dual_guided(
prompt=UpperCamelCase__ , image=UpperCamelCase__ , text_to_image_strength=0.75 , generator=UpperCamelCase__ , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' , ).images
SCREAMING_SNAKE_CASE : Tuple = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE : int = np.array([0.1448, 0.1619, 0.1741, 0.1086, 0.1147, 0.1128, 0.1199, 0.1165, 0.1001] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
SCREAMING_SNAKE_CASE : Optional[Any] = '''A painting of a squirrel eating a burger '''
SCREAMING_SNAKE_CASE : Optional[Any] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = pipe.text_to_image(
prompt=UpperCamelCase__ , generator=UpperCamelCase__ , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' ).images
SCREAMING_SNAKE_CASE : List[str] = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE : Any = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
SCREAMING_SNAKE_CASE : Optional[Any] = pipe.image_variation(UpperCamelCase__ , generator=UpperCamelCase__ , output_type='''numpy''' ).images
SCREAMING_SNAKE_CASE : Optional[int] = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE : Union[str, Any] = np.array([0.3076, 0.3123, 0.3284, 0.3782, 0.3770, 0.3894, 0.4297, 0.4331, 0.4456] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
| 34
| 1
|
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
__UpperCamelCase : Dict = np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
__UpperCamelCase : str = [0, 25, 50]
__UpperCamelCase : int = [25, 50, 75]
__UpperCamelCase : int = fuzz.membership.trimf(X, abca)
__UpperCamelCase : str = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
__UpperCamelCase : Tuple = np.ones(75)
__UpperCamelCase : Optional[int] = np.zeros((75,))
# 1. Union = max(µA(x), µB(x))
__UpperCamelCase : str = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
__UpperCamelCase : Tuple = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
__UpperCamelCase : int = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
__UpperCamelCase : List[Any] = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
__UpperCamelCase : Union[str, Any] = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
__UpperCamelCase : Optional[int] = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
__UpperCamelCase : Any = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
__UpperCamelCase : List[str] = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title('Young')
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title('Middle aged')
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title('union')
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title('intersection')
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title('complement_a')
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title('difference a/b')
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title('alg_sum')
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title('alg_product')
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title('bdd_sum')
plt.grid(True)
plt.subplot(4, 3, 10)
plt.plot(X, bdd_difference)
plt.title('bdd_difference')
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 34
|
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def A ( _lowercase ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = analyze_text(_lowercase )
SCREAMING_SNAKE_CASE : Any = list(''' ''' + ascii_lowercase )
# what is our total sum of probabilities.
SCREAMING_SNAKE_CASE : Tuple = sum(single_char_strings.values() )
# one length string
SCREAMING_SNAKE_CASE : Tuple = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
SCREAMING_SNAKE_CASE : Tuple = single_char_strings[ch]
SCREAMING_SNAKE_CASE : List[str] = my_str / all_sum
my_fir_sum += prob * math.loga(_lowercase ) # entropy formula.
# print entropy
print(f"""{round(-1 * my_fir_sum ):.1f}""" )
# two len string
SCREAMING_SNAKE_CASE : Optional[Any] = sum(two_char_strings.values() )
SCREAMING_SNAKE_CASE : List[str] = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
SCREAMING_SNAKE_CASE : Union[str, Any] = cha + cha
if sequence in two_char_strings:
SCREAMING_SNAKE_CASE : Any = two_char_strings[sequence]
SCREAMING_SNAKE_CASE : Dict = int(_lowercase ) / all_sum
my_sec_sum += prob * math.loga(_lowercase )
# print second entropy
print(f"""{round(-1 * my_sec_sum ):.1f}""" )
# print the difference between them
print(f"""{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}""" )
def A ( _lowercase ):
SCREAMING_SNAKE_CASE : Tuple = Counter() # type: ignore
SCREAMING_SNAKE_CASE : Any = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 , len(_lowercase ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def A ( ):
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 34
| 1
|
from __future__ import annotations
__UpperCamelCase : Union[str, Any] = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def A ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , ):
SCREAMING_SNAKE_CASE : Any = [
[0 for col in range(len(grid[0] ) )] for row in range(len(_lowercase ) )
] # the reference grid
SCREAMING_SNAKE_CASE : int = 1
SCREAMING_SNAKE_CASE : int = [
[0 for col in range(len(grid[0] ) )] for row in range(len(_lowercase ) )
] # the action grid
SCREAMING_SNAKE_CASE : Any = init[0]
SCREAMING_SNAKE_CASE : Union[str, Any] = init[1]
SCREAMING_SNAKE_CASE : List[Any] = 0
SCREAMING_SNAKE_CASE : Optional[Any] = g + heuristic[x][y] # cost from starting cell to destination cell
SCREAMING_SNAKE_CASE : Any = [[f, g, x, y]]
SCREAMING_SNAKE_CASE : List[Any] = False # flag that is set when search is complete
SCREAMING_SNAKE_CASE : Tuple = False # flag set if we can't find expand
while not found and not resign:
if len(_lowercase ) == 0:
raise ValueError('''Algorithm is unable to find solution''' )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
SCREAMING_SNAKE_CASE : List[str] = cell.pop()
SCREAMING_SNAKE_CASE : Tuple = next_cell[2]
SCREAMING_SNAKE_CASE : Dict = next_cell[3]
SCREAMING_SNAKE_CASE : List[Any] = next_cell[1]
if x == goal[0] and y == goal[1]:
SCREAMING_SNAKE_CASE : Tuple = True
else:
for i in range(len(_lowercase ) ): # to try out different valid actions
SCREAMING_SNAKE_CASE : Union[str, Any] = x + DIRECTIONS[i][0]
SCREAMING_SNAKE_CASE : Tuple = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(_lowercase ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
SCREAMING_SNAKE_CASE : Union[str, Any] = g + cost
SCREAMING_SNAKE_CASE : Any = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
SCREAMING_SNAKE_CASE : Any = 1
SCREAMING_SNAKE_CASE : Optional[int] = i
SCREAMING_SNAKE_CASE : str = []
SCREAMING_SNAKE_CASE : List[str] = goal[0]
SCREAMING_SNAKE_CASE : Optional[int] = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
SCREAMING_SNAKE_CASE : Dict = x - DIRECTIONS[action[x][y]][0]
SCREAMING_SNAKE_CASE : List[str] = y - DIRECTIONS[action[x][y]][1]
SCREAMING_SNAKE_CASE : Optional[Any] = xa
SCREAMING_SNAKE_CASE : Dict = ya
invpath.append([x, y] )
SCREAMING_SNAKE_CASE : List[str] = []
for i in range(len(_lowercase ) ):
path.append(invpath[len(_lowercase ) - 1 - i] )
return path, action
if __name__ == "__main__":
__UpperCamelCase : List[str] = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
__UpperCamelCase : Union[str, Any] = [0, 0]
# all coordinates are given in format [y,x]
__UpperCamelCase : Optional[Any] = [len(grid) - 1, len(grid[0]) - 1]
__UpperCamelCase : Union[str, Any] = 1
# the cost map which pushes the path closer to the goal
__UpperCamelCase : List[Any] = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
__UpperCamelCase : Union[str, Any] = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
__UpperCamelCase : str = 99
__UpperCamelCase , __UpperCamelCase : List[Any] = search(grid, init, goal, cost, heuristic)
print('ACTION MAP')
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 34
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__UpperCamelCase : Tuple = {
'configuration_ctrl': ['CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CTRLConfig'],
'tokenization_ctrl': ['CTRLTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Dict = [
'CTRL_PRETRAINED_MODEL_ARCHIVE_LIST',
'CTRLForSequenceClassification',
'CTRLLMHeadModel',
'CTRLModel',
'CTRLPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Any = [
'TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFCTRLForSequenceClassification',
'TFCTRLLMHeadModel',
'TFCTRLModel',
'TFCTRLPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig
from .tokenization_ctrl import CTRLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ctrl import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
CTRLPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_ctrl import (
TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCTRLForSequenceClassification,
TFCTRLLMHeadModel,
TFCTRLModel,
TFCTRLPreTrainedModel,
)
else:
import sys
__UpperCamelCase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 34
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.